]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'i40iw', 'sriov' and 'hfi1' into k.o/for-4.6
authorDoug Ledford <dledford@redhat.com>
Mon, 21 Mar 2016 21:32:23 +0000 (17:32 -0400)
committerDoug Ledford <dledford@redhat.com>
Mon, 21 Mar 2016 21:32:23 +0000 (17:32 -0400)
140 files changed:
MAINTAINERS
drivers/infiniband/Kconfig
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/fmr_pool.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/iwpm_util.h
drivers/infiniband/core/packer.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/ucma.c
drivers/infiniband/core/ud_header.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/Makefile
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/i40iw/Kconfig [new file with mode: 0644]
drivers/infiniband/hw/i40iw/Makefile [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_cm.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_cm.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_ctrl.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_d.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_hmc.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_hmc.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_hw.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_main.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_osdep.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_p.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_pble.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_pble.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_puda.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_puda.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_register.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_status.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_type.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_ucontext.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_uk.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_user.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_utils.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_verbs.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_verbs.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_vf.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_vf.h [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_virtchnl.c [new file with mode: 0644]
drivers/infiniband/hw/i40iw/i40iw_virtchnl.h [new file with mode: 0644]
drivers/infiniband/hw/mlx4/alias_GUID.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx5/Makefile
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/gsi.c [new file with mode: 0644]
drivers/infiniband/hw/mlx5/ib_virt.c [new file with mode: 0644]
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/hw/mlx5/user.h
drivers/infiniband/hw/nes/Kconfig
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_cm.h
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_client.c [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_client.h [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/qp.h
include/linux/mlx5/vport.h
include/linux/netdevice.h
include/rdma/ib_mad.h
include/rdma/ib_verbs.h
include/rdma/iw_cm.h
include/uapi/linux/if_link.h
include/uapi/rdma/rdma_netlink.h
net/9p/trans_rdma.c
net/core/rtnetlink.c

index 1eb4f8e95960825769a1aead687ae2276006c507..c62557e6893edef161242c4417c2a38d71951263 100644 (file)
@@ -5684,6 +5684,16 @@ F:       Documentation/networking/i40evf.txt
 F:     drivers/net/ethernet/intel/
 F:     drivers/net/ethernet/intel/*/
 
+INTEL RDMA RNIC DRIVER
+M:     Faisal Latif <faisal.latif@intel.com>
+R:     Chien Tin Tung <chien.tin.tung@intel.com>
+R:     Mustafa Ismail <mustafa.ismail@intel.com>
+R:     Shiraz Saleem <shiraz.saleem@intel.com>
+R:     Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+L:     linux-rdma@vger.kernel.org
+S:     Supported
+F:     drivers/infiniband/hw/i40iw/
+
 INTEL-MID GPIO DRIVER
 M:     David Cohen <david.a.cohen@linux.intel.com>
 L:     linux-gpio@vger.kernel.org
index d00d86d34a76caf2bce18098323e1284f894aac5..6425c0e5d18a655a7a50e608f283dd7609ef6e79 100644 (file)
@@ -68,6 +68,7 @@ source "drivers/infiniband/hw/mthca/Kconfig"
 source "drivers/infiniband/hw/qib/Kconfig"
 source "drivers/infiniband/hw/cxgb3/Kconfig"
 source "drivers/infiniband/hw/cxgb4/Kconfig"
+source "drivers/infiniband/hw/i40iw/Kconfig"
 source "drivers/infiniband/hw/mlx4/Kconfig"
 source "drivers/infiniband/hw/mlx5/Kconfig"
 source "drivers/infiniband/hw/nes/Kconfig"
index 53343ffbff7a1302b03ce5f6a66116b06f1e60b5..cb00d59da45616af57a03f29454434ec50fa9826 100644 (file)
@@ -1043,8 +1043,8 @@ static void ib_cache_update(struct ib_device *device,
 
        ret = ib_query_port(device, port, tprops);
        if (ret) {
-               printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
-                      ret, device->name);
+               pr_warn("ib_query_port failed (%d) for %s\n",
+                       ret, device->name);
                goto err;
        }
 
@@ -1067,8 +1067,8 @@ static void ib_cache_update(struct ib_device *device,
        for (i = 0; i < pkey_cache->table_len; ++i) {
                ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
                if (ret) {
-                       printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
-                              ret, device->name, i);
+                       pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
+                               ret, device->name, i);
                        goto err;
                }
        }
@@ -1078,8 +1078,8 @@ static void ib_cache_update(struct ib_device *device,
                        ret = ib_query_gid(device, port, i,
                                           gid_cache->table + i, NULL);
                        if (ret) {
-                               printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
-                                      ret, device->name, i);
+                               pr_warn("ib_query_gid failed (%d) for %s (index %d)\n",
+                                       ret, device->name, i);
                                goto err;
                        }
                }
@@ -1161,8 +1161,7 @@ int ib_cache_setup_one(struct ib_device *device)
                                          GFP_KERNEL);
        if (!device->cache.pkey_cache ||
            !device->cache.lmc_cache) {
-               printk(KERN_WARNING "Couldn't allocate cache "
-                      "for %s\n", device->name);
+               pr_warn("Couldn't allocate cache for %s\n", device->name);
                return -ENOMEM;
        }
 
index 9729639df407abdc382959ef8151fbbcc3370589..93ab0ae9720889f9cf51d6629a72469b67d646d4 100644 (file)
@@ -1206,6 +1206,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                req->has_gid    = true;
                req->service_id = req_param->primary_path->service_id;
                req->pkey       = be16_to_cpu(req_param->primary_path->pkey);
+               if (req->pkey != req_param->bth_pkey)
+                       pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
+                                           "RDMA CMA: in the future this may cause the request to be dropped\n",
+                                           req_param->bth_pkey, req->pkey);
                break;
        case IB_CM_SIDR_REQ_RECEIVED:
                req->device     = sidr_param->listen_id->device;
@@ -1213,6 +1217,10 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
                req->has_gid    = false;
                req->service_id = sidr_param->service_id;
                req->pkey       = sidr_param->pkey;
+               if (req->pkey != sidr_param->bth_pkey)
+                       pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
+                                           "RDMA CMA: in the future this may cause the request to be dropped\n",
+                                           sidr_param->bth_pkey, req->pkey);
                break;
        default:
                return -EINVAL;
@@ -1713,7 +1721,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
                break;
        default:
-               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+               pr_err("RDMA CMA: unexpected IB CM event: %d\n",
                       ib_event->event);
                goto out;
        }
@@ -2186,8 +2194,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
 
        ret = rdma_listen(id, id_priv->backlog);
        if (ret)
-               printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
-                      "listening on device %s\n", ret, cma_dev->device->name);
+               pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
+                       ret, cma_dev->device->name);
 }
 
 static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -3239,7 +3247,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
                event.status = 0;
                break;
        default:
-               printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
+               pr_err("RDMA CMA: unexpected IB CM event: %d\n",
                       ib_event->event);
                goto out;
        }
@@ -4003,8 +4011,8 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
        if ((dev_addr->bound_dev_if == ndev->ifindex) &&
            (net_eq(dev_net(ndev), dev_addr->net)) &&
            memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
-               printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
-                      ndev->name, &id_priv->id);
+               pr_info("RDMA CM addr change for ndev %s used by id %p\n",
+                       ndev->name, &id_priv->id);
                work = kzalloc(sizeof *work, GFP_KERNEL);
                if (!work)
                        return -ENOMEM;
@@ -4287,7 +4295,7 @@ static int __init cma_init(void)
                goto err;
 
        if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
-               printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+               pr_warn("RDMA CMA: failed to add netlink callback\n");
        cma_configfs_init();
 
        return 0;
index 00da80e02154205c428ca00702f4b7c10fa5a829..10979844026a01bda540a6f99d2cef0ef56b525f 100644 (file)
@@ -115,8 +115,8 @@ static int ib_device_check_mandatory(struct ib_device *device)
 
        for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
                if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
-                       printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
-                              device->name, mandatory_table[i].name);
+                       pr_warn("Device %s is missing mandatory function %s\n",
+                               device->name, mandatory_table[i].name);
                        return -EINVAL;
                }
        }
@@ -255,8 +255,8 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
 
        context = kmalloc(sizeof *context, GFP_KERNEL);
        if (!context) {
-               printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
-                      device->name, client->name);
+               pr_warn("Couldn't allocate client context for %s/%s\n",
+                       device->name, client->name);
                return -ENOMEM;
        }
 
@@ -343,28 +343,29 @@ int ib_register_device(struct ib_device *device,
 
        ret = read_port_immutable(device);
        if (ret) {
-               printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
-                      device->name);
+               pr_warn("Couldn't create per port immutable data %s\n",
+                       device->name);
                goto out;
        }
 
        ret = ib_cache_setup_one(device);
        if (ret) {
-               printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
+               pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
                goto out;
        }
 
        memset(&device->attrs, 0, sizeof(device->attrs));
        ret = device->query_device(device, &device->attrs, &uhw);
        if (ret) {
-               printk(KERN_WARNING "Couldn't query the device attributes\n");
+               pr_warn("Couldn't query the device attributes\n");
+               ib_cache_cleanup_one(device);
                goto out;
        }
 
        ret = ib_device_register_sysfs(device, port_callback);
        if (ret) {
-               printk(KERN_WARNING "Couldn't register device %s with driver model\n",
-                      device->name);
+               pr_warn("Couldn't register device %s with driver model\n",
+                       device->name);
                ib_cache_cleanup_one(device);
                goto out;
        }
@@ -565,8 +566,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
                        goto out;
                }
 
-       printk(KERN_WARNING "No client context found for %s/%s\n",
-              device->name, client->name);
+       pr_warn("No client context found for %s/%s\n",
+               device->name, client->name);
 
 out:
        spin_unlock_irqrestore(&device->client_data_lock, flags);
@@ -649,10 +650,23 @@ int ib_query_port(struct ib_device *device,
                  u8 port_num,
                  struct ib_port_attr *port_attr)
 {
+       union ib_gid gid;
+       int err;
+
        if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
                return -EINVAL;
 
-       return device->query_port(device, port_num, port_attr);
+       memset(port_attr, 0, sizeof(*port_attr));
+       err = device->query_port(device, port_num, port_attr);
+       if (err || port_attr->subnet_prefix)
+               return err;
+
+       err = ib_query_gid(device, port_num, 0, &gid, NULL);
+       if (err)
+               return err;
+
+       port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
+       return 0;
 }
 EXPORT_SYMBOL(ib_query_port);
 
@@ -959,13 +973,13 @@ static int __init ib_core_init(void)
 
        ret = class_register(&ib_class);
        if (ret) {
-               printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
+               pr_warn("Couldn't create InfiniBand device class\n");
                goto err_comp;
        }
 
        ret = ibnl_init();
        if (ret) {
-               printk(KERN_WARNING "Couldn't init IB netlink interface\n");
+               pr_warn("Couldn't init IB netlink interface\n");
                goto err_sysfs;
        }
 
index 6ac3683c144ba859ff34709cc292e2fc3208f621..cdbb1f1a6d976f317964efe97155070546e124e3 100644 (file)
@@ -150,8 +150,8 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 
 #ifdef DEBUG
                if (fmr->ref_count !=0) {
-                       printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
-                              fmr, fmr->ref_count);
+                       pr_warn(PFX "Unmapping FMR 0x%08x with ref count %d\n",
+                               fmr, fmr->ref_count);
                }
 #endif
        }
@@ -167,7 +167,7 @@ static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
 
        ret = ib_unmap_fmr(&fmr_list);
        if (ret)
-               printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
+               pr_warn(PFX "ib_unmap_fmr returned %d\n", ret);
 
        spin_lock_irq(&pool->pool_lock);
        list_splice(&unmap_list, &pool->free_list);
@@ -222,8 +222,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
        device = pd->device;
        if (!device->alloc_fmr    || !device->dealloc_fmr  ||
            !device->map_phys_fmr || !device->unmap_fmr) {
-               printk(KERN_INFO PFX "Device %s does not support FMRs\n",
-                      device->name);
+               pr_info(PFX "Device %s does not support FMRs\n", device->name);
                return ERR_PTR(-ENOSYS);
        }
 
@@ -233,13 +232,10 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
                max_remaps = device->attrs.max_map_per_fmr;
 
        pool = kmalloc(sizeof *pool, GFP_KERNEL);
-       if (!pool) {
-               printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
+       if (!pool)
                return ERR_PTR(-ENOMEM);
-       }
 
        pool->cache_bucket   = NULL;
-
        pool->flush_function = params->flush_function;
        pool->flush_arg      = params->flush_arg;
 
@@ -251,7 +247,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
                        kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
                                GFP_KERNEL);
                if (!pool->cache_bucket) {
-                       printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
+                       pr_warn(PFX "Failed to allocate cache in pool\n");
                        ret = -ENOMEM;
                        goto out_free_pool;
                }
@@ -275,7 +271,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
                                   "ib_fmr(%s)",
                                   device->name);
        if (IS_ERR(pool->thread)) {
-               printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
+               pr_warn(PFX "couldn't start cleanup thread\n");
                ret = PTR_ERR(pool->thread);
                goto out_free_pool;
        }
@@ -294,11 +290,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
 
                for (i = 0; i < params->pool_size; ++i) {
                        fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
-                       if (!fmr) {
-                               printk(KERN_WARNING PFX "failed to allocate fmr "
-                                      "struct for FMR %d\n", i);
+                       if (!fmr)
                                goto out_fail;
-                       }
 
                        fmr->pool             = pool;
                        fmr->remap_count      = 0;
@@ -307,8 +300,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
 
                        fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
                        if (IS_ERR(fmr->fmr)) {
-                               printk(KERN_WARNING PFX "fmr_create failed "
-                                      "for FMR %d\n", i);
+                               pr_warn(PFX "fmr_create failed for FMR %d\n",
+                                       i);
                                kfree(fmr);
                                goto out_fail;
                        }
@@ -363,8 +356,8 @@ void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
        }
 
        if (i < pool->pool_size)
-               printk(KERN_WARNING PFX "pool still has %d regions registered\n",
-                      pool->pool_size - i);
+               pr_warn(PFX "pool still has %d regions registered\n",
+                       pool->pool_size - i);
 
        kfree(pool->cache_bucket);
        kfree(pool);
@@ -463,7 +456,7 @@ struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
                list_add(&fmr->list, &pool->free_list);
                spin_unlock_irqrestore(&pool->pool_lock, flags);
 
-               printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
+               pr_warn(PFX "fmr_map returns %d\n", result);
 
                return ERR_PTR(result);
        }
@@ -517,8 +510,8 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
 
 #ifdef DEBUG
        if (fmr->ref_count < 0)
-               printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
-                      fmr, fmr->ref_count);
+               pr_warn(PFX "FMR %p has ref count %d < 0\n",
+                       fmr, fmr->ref_count);
 #endif
 
        spin_unlock_irqrestore(&pool->pool_lock, flags);
index ff9163dc159614fe76ca7c476ff80b64de9c4e54..e28a160cdab03650441cd0ff48b44e0cffce9a68 100644 (file)
@@ -50,6 +50,8 @@
 
 #include <rdma/iw_cm.h>
 #include <rdma/ib_addr.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
 
 #include "iwcm.h"
 
@@ -57,6 +59,16 @@ MODULE_AUTHOR("Tom Tucker");
 MODULE_DESCRIPTION("iWARP CM");
 MODULE_LICENSE("Dual BSD/GPL");
 
+static struct ibnl_client_cbs iwcm_nl_cb_table[] = {
+       [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
+       [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+       [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+       [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
+       [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+       [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
+       [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
+};
+
 static struct workqueue_struct *iwcm_wq;
 struct iwcm_work {
        struct work_struct work;
@@ -402,6 +414,11 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+       if (cm_id->mapped) {
+               iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
+               iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
+       }
+
        (void)iwcm_deref_id(cm_id_priv);
 }
 
@@ -426,6 +443,97 @@ void iw_destroy_cm_id(struct iw_cm_id *cm_id)
 }
 EXPORT_SYMBOL(iw_destroy_cm_id);
 
+/**
+ * iw_cm_check_wildcard - If IP address is 0 then use original
+ * @pm_addr: sockaddr containing the ip to check for wildcard
+ * @cm_addr: sockaddr containing the actual IP address
+ * @cm_outaddr: sockaddr to set IP addr which leaving port
+ *
+ *  Checks the pm_addr for wildcard and then sets cm_outaddr's
+ *  IP to the actual (cm_addr).
+ */
+static void iw_cm_check_wildcard(struct sockaddr_storage *pm_addr,
+                                struct sockaddr_storage *cm_addr,
+                                struct sockaddr_storage *cm_outaddr)
+{
+       if (pm_addr->ss_family == AF_INET) {
+               struct sockaddr_in *pm4_addr = (struct sockaddr_in *)pm_addr;
+
+               if (pm4_addr->sin_addr.s_addr == INADDR_ANY) {
+                       struct sockaddr_in *cm4_addr =
+                               (struct sockaddr_in *)cm_addr;
+                       struct sockaddr_in *cm4_outaddr =
+                               (struct sockaddr_in *)cm_outaddr;
+
+                       cm4_outaddr->sin_addr = cm4_addr->sin_addr;
+               }
+       } else {
+               struct sockaddr_in6 *pm6_addr = (struct sockaddr_in6 *)pm_addr;
+
+               if (ipv6_addr_type(&pm6_addr->sin6_addr) == IPV6_ADDR_ANY) {
+                       struct sockaddr_in6 *cm6_addr =
+                               (struct sockaddr_in6 *)cm_addr;
+                       struct sockaddr_in6 *cm6_outaddr =
+                               (struct sockaddr_in6 *)cm_outaddr;
+
+                       cm6_outaddr->sin6_addr = cm6_addr->sin6_addr;
+               }
+       }
+}
+
+/**
+ * iw_cm_map - Use portmapper to map the ports
+ * @cm_id: connection manager pointer
+ * @active: Indicates the active side when true
+ * returns nonzero for error only if iwpm_create_mapinfo() fails
+ *
+ * Tries to add a mapping for a port using the Portmapper. If
+ * successful in mapping the IP/Port it will check the remote
+ * mapped IP address for a wildcard IP address and replace the
+ * zero IP address with the remote_addr.
+ */
+static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
+{
+       struct iwpm_dev_data pm_reg_msg;
+       struct iwpm_sa_data pm_msg;
+       int status;
+
+       cm_id->m_local_addr = cm_id->local_addr;
+       cm_id->m_remote_addr = cm_id->remote_addr;
+
+       memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+              sizeof(pm_reg_msg.dev_name));
+       memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
+              sizeof(pm_reg_msg.if_name));
+
+       if (iwpm_register_pid(&pm_reg_msg, RDMA_NL_IWCM) ||
+           !iwpm_valid_pid())
+               return 0;
+
+       cm_id->mapped = true;
+       pm_msg.loc_addr = cm_id->local_addr;
+       pm_msg.rem_addr = cm_id->remote_addr;
+       if (active)
+               status = iwpm_add_and_query_mapping(&pm_msg,
+                                                   RDMA_NL_IWCM);
+       else
+               status = iwpm_add_mapping(&pm_msg, RDMA_NL_IWCM);
+
+       if (!status) {
+               cm_id->m_local_addr = pm_msg.mapped_loc_addr;
+               if (active) {
+                       cm_id->m_remote_addr = pm_msg.mapped_rem_addr;
+                       iw_cm_check_wildcard(&pm_msg.mapped_rem_addr,
+                                            &cm_id->remote_addr,
+                                            &cm_id->m_remote_addr);
+               }
+       }
+
+       return iwpm_create_mapinfo(&cm_id->local_addr,
+                                  &cm_id->m_local_addr,
+                                  RDMA_NL_IWCM);
+}
+
 /*
  * CM_ID <-- LISTEN
  *
@@ -452,7 +560,9 @@ int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
        case IW_CM_STATE_IDLE:
                cm_id_priv->state = IW_CM_STATE_LISTEN;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
+               ret = iw_cm_map(cm_id, false);
+               if (!ret)
+                       ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
                if (ret)
                        cm_id_priv->state = IW_CM_STATE_IDLE;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
@@ -582,39 +692,37 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        spin_lock_irqsave(&cm_id_priv->lock, flags);
 
        if (cm_id_priv->state != IW_CM_STATE_IDLE) {
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
-               wake_up_all(&cm_id_priv->connect_wait);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err;
        }
 
        /* Get the ib_qp given the QPN */
        qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
        if (!qp) {
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
-               wake_up_all(&cm_id_priv->connect_wait);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err;
        }
        cm_id->device->iwcm->add_ref(qp);
        cm_id_priv->qp = qp;
        cm_id_priv->state = IW_CM_STATE_CONN_SENT;
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
-       ret = cm_id->device->iwcm->connect(cm_id, iw_param);
-       if (ret) {
-               spin_lock_irqsave(&cm_id_priv->lock, flags);
-               if (cm_id_priv->qp) {
-                       cm_id->device->iwcm->rem_ref(qp);
-                       cm_id_priv->qp = NULL;
-               }
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
-               cm_id_priv->state = IW_CM_STATE_IDLE;
-               clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
-               wake_up_all(&cm_id_priv->connect_wait);
-       }
+       ret = iw_cm_map(cm_id, true);
+       if (!ret)
+               ret = cm_id->device->iwcm->connect(cm_id, iw_param);
+       if (!ret)
+               return 0;       /* success */
 
+       spin_lock_irqsave(&cm_id_priv->lock, flags);
+       if (cm_id_priv->qp) {
+               cm_id->device->iwcm->rem_ref(qp);
+               cm_id_priv->qp = NULL;
+       }
+       cm_id_priv->state = IW_CM_STATE_IDLE;
+err:
+       spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
+       wake_up_all(&cm_id_priv->connect_wait);
        return ret;
 }
 EXPORT_SYMBOL(iw_cm_connect);
@@ -656,8 +764,23 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
                goto out;
 
        cm_id->provider_data = iw_event->provider_data;
-       cm_id->local_addr = iw_event->local_addr;
-       cm_id->remote_addr = iw_event->remote_addr;
+       cm_id->m_local_addr = iw_event->local_addr;
+       cm_id->m_remote_addr = iw_event->remote_addr;
+       cm_id->local_addr = listen_id_priv->id.local_addr;
+
+       ret = iwpm_get_remote_info(&listen_id_priv->id.m_local_addr,
+                                  &iw_event->remote_addr,
+                                  &cm_id->remote_addr,
+                                  RDMA_NL_IWCM);
+       if (ret) {
+               cm_id->remote_addr = iw_event->remote_addr;
+       } else {
+               iw_cm_check_wildcard(&listen_id_priv->id.m_local_addr,
+                                    &iw_event->local_addr,
+                                    &cm_id->local_addr);
+               iw_event->local_addr = cm_id->local_addr;
+               iw_event->remote_addr = cm_id->remote_addr;
+       }
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
        cm_id_priv->state = IW_CM_STATE_CONN_RECV;
@@ -753,8 +876,10 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
        BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
        if (iw_event->status == 0) {
-               cm_id_priv->id.local_addr = iw_event->local_addr;
-               cm_id_priv->id.remote_addr = iw_event->remote_addr;
+               cm_id_priv->id.m_local_addr = iw_event->local_addr;
+               cm_id_priv->id.m_remote_addr = iw_event->remote_addr;
+               iw_event->local_addr = cm_id_priv->id.local_addr;
+               iw_event->remote_addr = cm_id_priv->id.remote_addr;
                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
        } else {
                /* REJECTED or RESET */
@@ -1044,6 +1169,17 @@ EXPORT_SYMBOL(iw_cm_init_qp_attr);
 
 static int __init iw_cm_init(void)
 {
+       int ret;
+
+       ret = iwpm_init(RDMA_NL_IWCM);
+       if (ret)
+               pr_err("iw_cm: couldn't init iwpm\n");
+
+       ret = ibnl_add_client(RDMA_NL_IWCM, RDMA_NL_IWPM_NUM_OPS,
+                             iwcm_nl_cb_table);
+       if (ret)
+               pr_err("iw_cm: couldn't register netlink callbacks\n");
+
        iwcm_wq = create_singlethread_workqueue("iw_cm_wq");
        if (!iwcm_wq)
                return -ENOMEM;
@@ -1063,6 +1199,8 @@ static void __exit iw_cm_cleanup(void)
 {
        unregister_net_sysctl_table(iwcm_ctl_table_hdr);
        destroy_workqueue(iwcm_wq);
+       ibnl_remove_client(RDMA_NL_IWCM);
+       iwpm_exit(RDMA_NL_IWCM);
 }
 
 module_init(iw_cm_init);
index 22a3abee2a54c0fdce95a4567ef3cc45a20901ad..43e3fa27102b8cd40fb39dc6e4747deed82c0f75 100644 (file)
@@ -88,8 +88,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
        ret = ibnl_put_attr(skb, nlh, sizeof(u32), &msg_seq, IWPM_NLA_REG_PID_SEQ);
        if (ret)
                goto pid_query_error;
-       ret = ibnl_put_attr(skb, nlh, IWPM_IFNAME_SIZE,
-                               pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
+       ret = ibnl_put_attr(skb, nlh, IFNAMSIZ,
+                           pm_msg->if_name, IWPM_NLA_REG_IF_NAME);
        if (ret)
                goto pid_query_error;
        ret = ibnl_put_attr(skb, nlh, IWPM_DEVNAME_SIZE,
@@ -394,7 +394,7 @@ register_pid_response_exit:
        /* always for found nlmsg_request */
        kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
        barrier();
-       wake_up(&nlmsg_request->waitq);
+       up(&nlmsg_request->sem);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_register_pid_cb);
@@ -463,7 +463,7 @@ add_mapping_response_exit:
        /* always for found request */
        kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
        barrier();
-       wake_up(&nlmsg_request->waitq);
+       up(&nlmsg_request->sem);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_add_mapping_cb);
@@ -555,7 +555,7 @@ query_mapping_response_exit:
        /* always for found request */
        kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
        barrier();
-       wake_up(&nlmsg_request->waitq);
+       up(&nlmsg_request->sem);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
@@ -749,7 +749,7 @@ int iwpm_mapping_error_cb(struct sk_buff *skb, struct netlink_callback *cb)
        /* always for found request */
        kref_put(&nlmsg_request->kref, iwpm_free_nlmsg_request);
        barrier();
-       wake_up(&nlmsg_request->waitq);
+       up(&nlmsg_request->sem);
        return 0;
 }
 EXPORT_SYMBOL(iwpm_mapping_error_cb);
index 5fb089e913530c54a9852d4ae6fabcda24a00451..9b2bf2fb2b00674287e74e7cb3048a5fa296b91e 100644 (file)
@@ -254,9 +254,9 @@ void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
 }
 
 int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
-                               struct sockaddr_storage *mapped_rem_addr,
-                               struct sockaddr_storage *remote_addr,
-                               u8 nl_client)
+                        struct sockaddr_storage *mapped_rem_addr,
+                        struct sockaddr_storage *remote_addr,
+                        u8 nl_client)
 {
        struct hlist_node *tmp_hlist_node;
        struct hlist_head *hash_bucket_head;
@@ -322,6 +322,8 @@ struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
        nlmsg_request->nl_client = nl_client;
        nlmsg_request->request_done = 0;
        nlmsg_request->err_code = 0;
+       sema_init(&nlmsg_request->sem, 1);
+       down(&nlmsg_request->sem);
        return nlmsg_request;
 }
 
@@ -364,11 +366,9 @@ struct iwpm_nlmsg_request *iwpm_find_nlmsg_request(__u32 echo_seq)
 int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request)
 {
        int ret;
-       init_waitqueue_head(&nlmsg_request->waitq);
 
-       ret = wait_event_timeout(nlmsg_request->waitq,
-                       (nlmsg_request->request_done != 0), IWPM_NL_TIMEOUT);
-       if (!ret) {
+       ret = down_timeout(&nlmsg_request->sem, IWPM_NL_TIMEOUT);
+       if (ret) {
                ret = -EINVAL;
                pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
                        __func__, (IWPM_NL_TIMEOUT/HZ), nlmsg_request->nlmsg_seq);
index b7b9e194ce81fd8f2c7598bb2a6b58cab4fad6b8..af1fc14a0d3d219b0d8ebd9097ece6329f8d5371 100644 (file)
@@ -69,7 +69,7 @@ struct iwpm_nlmsg_request {
        u8                  nl_client;
        u8                  request_done;
        u16                 err_code;
-       wait_queue_head_t   waitq;
+       struct semaphore    sem;
        struct kref         kref;
 };
 
index 1b65986c0be3daaf5a437b3f6c3b773f7a26c329..19b1ee3279b4a437729636e929338130c04858aa 100644 (file)
@@ -44,7 +44,7 @@ static u64 value_read(int offset, int size, void *structure)
        case 4: return be32_to_cpup((__be32 *) (structure + offset));
        case 8: return be64_to_cpup((__be64 *) (structure + offset));
        default:
-               printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+               pr_warn("Field size %d bits not handled\n", size * 8);
                return 0;
        }
 }
@@ -104,9 +104,8 @@ void ib_pack(const struct ib_field        *desc,
                } else {
                        if (desc[i].offset_bits % 8 ||
                            desc[i].size_bits   % 8) {
-                               printk(KERN_WARNING "Structure field %s of size %d "
-                                      "bits is not byte-aligned\n",
-                                      desc[i].field_name, desc[i].size_bits);
+                               pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+                                       desc[i].field_name, desc[i].size_bits);
                        }
 
                        if (desc[i].struct_size_bytes)
@@ -132,7 +131,7 @@ static void value_write(int offset, int size, u64 val, void *structure)
        case 32: *(__be32 *) (structure + offset) = cpu_to_be32(val); break;
        case 64: *(__be64 *) (structure + offset) = cpu_to_be64(val); break;
        default:
-               printk(KERN_WARNING "Field size %d bits not handled\n", size * 8);
+               pr_warn("Field size %d bits not handled\n", size * 8);
        }
 }
 
@@ -188,9 +187,8 @@ void ib_unpack(const struct ib_field        *desc,
                } else {
                        if (desc[i].offset_bits % 8 ||
                            desc[i].size_bits   % 8) {
-                               printk(KERN_WARNING "Structure field %s of size %d "
-                                      "bits is not byte-aligned\n",
-                                      desc[i].field_name, desc[i].size_bits);
+                               pr_warn("Structure field %s of size %d bits is not byte-aligned\n",
+                                       desc[i].field_name, desc[i].size_bits);
                        }
 
                        memcpy(structure + desc[i].struct_offset_bytes,
index f334090bb6129bf7f3cfe788e5cff7bc762752c3..d2214a55ac4ac4bab9aa4f9c7540745f96ab02ec 100644 (file)
@@ -864,13 +864,12 @@ static void update_sm_ah(struct work_struct *work)
        struct ib_ah_attr   ah_attr;
 
        if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
-               printk(KERN_WARNING "Couldn't query port\n");
+               pr_warn("Couldn't query port\n");
                return;
        }
 
        new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
        if (!new_ah) {
-               printk(KERN_WARNING "Couldn't allocate new SM AH\n");
                return;
        }
 
@@ -880,16 +879,21 @@ static void update_sm_ah(struct work_struct *work)
        new_ah->pkey_index = 0;
        if (ib_find_pkey(port->agent->device, port->port_num,
                         IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
-               printk(KERN_ERR "Couldn't find index for default PKey\n");
+               pr_err("Couldn't find index for default PKey\n");
 
        memset(&ah_attr, 0, sizeof ah_attr);
        ah_attr.dlid     = port_attr.sm_lid;
        ah_attr.sl       = port_attr.sm_sl;
        ah_attr.port_num = port->port_num;
+       if (port_attr.grh_required) {
+               ah_attr.ah_flags = IB_AH_GRH;
+               ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
+               ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
+       }
 
        new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
        if (IS_ERR(new_ah->ah)) {
-               printk(KERN_WARNING "Couldn't create new SM AH\n");
+               pr_warn("Couldn't create new SM AH\n");
                kfree(new_ah);
                return;
        }
@@ -1221,7 +1225,7 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
                rec.net = NULL;
                rec.ifindex = 0;
                rec.gid_type = IB_GID_TYPE_IB;
-               memset(rec.dmac, 0, ETH_ALEN);
+               eth_zero_addr(rec.dmac);
                query->callback(status, &rec, query->context);
        } else
                query->callback(status, NULL, query->context);
@@ -1800,13 +1804,13 @@ static int __init ib_sa_init(void)
 
        ret = ib_register_client(&sa_client);
        if (ret) {
-               printk(KERN_ERR "Couldn't register ib_sa client\n");
+               pr_err("Couldn't register ib_sa client\n");
                goto err1;
        }
 
        ret = mcast_init();
        if (ret) {
-               printk(KERN_ERR "Couldn't initialize multicast handling\n");
+               pr_err("Couldn't initialize multicast handling\n");
                goto err2;
        }
 
index 6b4e8a008bc0418f3ad54728b73e03e6e8d6e1fa..4a9aa0433b07f46b67412b3cf70c9e1bc63c4a45 100644 (file)
@@ -1234,7 +1234,7 @@ static int find_overflow_devnum(void)
                ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
                                          "infiniband_cm");
                if (ret) {
-                       printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+                       pr_err("ucm: couldn't register dynamic device number\n");
                        return ret;
                }
        }
@@ -1329,19 +1329,19 @@ static int __init ib_ucm_init(void)
        ret = register_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES,
                                     "infiniband_cm");
        if (ret) {
-               printk(KERN_ERR "ucm: couldn't register device number\n");
+               pr_err("ucm: couldn't register device number\n");
                goto error1;
        }
 
        ret = class_create_file(&cm_class, &class_attr_abi_version.attr);
        if (ret) {
-               printk(KERN_ERR "ucm: couldn't create abi_version attribute\n");
+               pr_err("ucm: couldn't create abi_version attribute\n");
                goto error2;
        }
 
        ret = ib_register_client(&ucm_client);
        if (ret) {
-               printk(KERN_ERR "ucm: couldn't register client\n");
+               pr_err("ucm: couldn't register client\n");
                goto error3;
        }
        return 0;
index 8b5a934e1133d80b42e12d1790b3672d73e4a779..dd3bcceadfdef2d277109e0ecfeb334052c1efa4 100644 (file)
@@ -314,7 +314,7 @@ static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
                }
        }
        if (!event_found)
-               printk(KERN_ERR "ucma_removal_event_handler: warning: connect request event wasn't found\n");
+               pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
 }
 
 static int ucma_event_handler(struct rdma_cm_id *cm_id,
@@ -1716,13 +1716,13 @@ static int __init ucma_init(void)
 
        ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
        if (ret) {
-               printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
+               pr_err("rdma_ucm: couldn't create abi_version attr\n");
                goto err1;
        }
 
        ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
        if (!ucma_ctl_table_hdr) {
-               printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
+               pr_err("rdma_ucm: couldn't register sysctl paths\n");
                ret = -ENOMEM;
                goto err2;
        }
index 2116132568e70e61c8465ffc9d922d732aca28ed..29a45d2f8898e1fde29667ff3ffb81c018b2246c 100644 (file)
@@ -479,8 +479,8 @@ int ib_ud_header_unpack(void                *buf,
        buf += IB_LRH_BYTES;
 
        if (header->lrh.link_version != 0) {
-               printk(KERN_WARNING "Invalid LRH.link_version %d\n",
-                      header->lrh.link_version);
+               pr_warn("Invalid LRH.link_version %d\n",
+                       header->lrh.link_version);
                return -EINVAL;
        }
 
@@ -496,20 +496,20 @@ int ib_ud_header_unpack(void                *buf,
                buf += IB_GRH_BYTES;
 
                if (header->grh.ip_version != 6) {
-                       printk(KERN_WARNING "Invalid GRH.ip_version %d\n",
-                              header->grh.ip_version);
+                       pr_warn("Invalid GRH.ip_version %d\n",
+                               header->grh.ip_version);
                        return -EINVAL;
                }
                if (header->grh.next_header != 0x1b) {
-                       printk(KERN_WARNING "Invalid GRH.next_header 0x%02x\n",
-                              header->grh.next_header);
+                       pr_warn("Invalid GRH.next_header 0x%02x\n",
+                               header->grh.next_header);
                        return -EINVAL;
                }
                break;
 
        default:
-               printk(KERN_WARNING "Invalid LRH.link_next_header %d\n",
-                      header->lrh.link_next_header);
+               pr_warn("Invalid LRH.link_next_header %d\n",
+                       header->lrh.link_next_header);
                return -EINVAL;
        }
 
@@ -525,14 +525,13 @@ int ib_ud_header_unpack(void                *buf,
                header->immediate_present = 1;
                break;
        default:
-               printk(KERN_WARNING "Invalid BTH.opcode 0x%02x\n",
-                      header->bth.opcode);
+               pr_warn("Invalid BTH.opcode 0x%02x\n", header->bth.opcode);
                return -EINVAL;
        }
 
        if (header->bth.transport_header_version != 0) {
-               printk(KERN_WARNING "Invalid BTH.transport_header_version %d\n",
-                      header->bth.transport_header_version);
+               pr_warn("Invalid BTH.transport_header_version %d\n",
+                       header->bth.transport_header_version);
                return -EINVAL;
        }
 
index 6ffc9c4e93afb4efa27fe6f3c17fc3cdf41d8c21..6fdc7ecdaca0c3df64e471e9d7b4b3f8242f00ff 100644 (file)
@@ -402,7 +402,7 @@ static void copy_query_dev_fields(struct ib_uverbs_file *file,
        resp->hw_ver            = attr->hw_ver;
        resp->max_qp            = attr->max_qp;
        resp->max_qp_wr         = attr->max_qp_wr;
-       resp->device_cap_flags  = attr->device_cap_flags;
+       resp->device_cap_flags  = lower_32_bits(attr->device_cap_flags);
        resp->max_sge           = attr->max_sge;
        resp->max_sge_rd        = attr->max_sge_rd;
        resp->max_cq            = attr->max_cq;
@@ -1174,6 +1174,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
        struct ib_uobject             *uobj;
        struct ib_pd                  *pd;
        struct ib_mw                  *mw;
+       struct ib_udata                udata;
        int                            ret;
 
        if (out_len < sizeof(resp))
@@ -1195,7 +1196,12 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
                goto err_free;
        }
 
-       mw = pd->device->alloc_mw(pd, cmd.mw_type);
+       INIT_UDATA(&udata, buf + sizeof(cmd),
+                  (unsigned long)cmd.response + sizeof(resp),
+                  in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - sizeof(resp));
+
+       mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
        if (IS_ERR(mw)) {
                ret = PTR_ERR(mw);
                goto err_put;
@@ -1970,7 +1976,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
                   resp_size);
        INIT_UDATA(&uhw, buf + sizeof(cmd),
                   (unsigned long)cmd.response + resp_size,
-                  in_len - sizeof(cmd), out_len - resp_size);
+                  in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - resp_size);
 
        memset(&cmd_ex, 0, sizeof(cmd_ex));
        cmd_ex.user_handle = cmd.user_handle;
@@ -3085,6 +3092,14 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
             !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
                return -EPERM;
 
+       if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
+               return -EINVAL;
+
+       if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+           ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
+            (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
+               return -EINVAL;
+
        if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
                return -EINVAL;
 
@@ -3413,7 +3428,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
 
        INIT_UDATA(&udata, buf + sizeof cmd,
                   (unsigned long) cmd.response + sizeof resp,
-                  in_len - sizeof cmd, out_len - sizeof resp);
+                  in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - sizeof resp);
 
        ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
        if (ret)
@@ -3439,7 +3455,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
 
        INIT_UDATA(&udata, buf + sizeof cmd,
                   (unsigned long) cmd.response + sizeof resp,
-                  in_len - sizeof cmd, out_len - sizeof resp);
+                  in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
+                  out_len - sizeof resp);
 
        ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
        if (ret)
@@ -3583,9 +3600,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
                              struct ib_udata *ucore,
                              struct ib_udata *uhw)
 {
-       struct ib_uverbs_ex_query_device_resp resp;
+       struct ib_uverbs_ex_query_device_resp resp = { {0} };
        struct ib_uverbs_ex_query_device  cmd;
-       struct ib_device_attr attr;
+       struct ib_device_attr attr = {0};
        int err;
 
        if (ucore->inlen < sizeof(cmd))
@@ -3606,14 +3623,11 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
        if (ucore->outlen < resp.response_length)
                return -ENOSPC;
 
-       memset(&attr, 0, sizeof(attr));
-
        err = ib_dev->query_device(ib_dev, &attr, uhw);
        if (err)
                return err;
 
        copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
-       resp.comp_mask = 0;
 
        if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
                goto end;
@@ -3626,9 +3640,6 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
                attr.odp_caps.per_transport_caps.uc_odp_caps;
        resp.odp_caps.per_transport_caps.ud_odp_caps =
                attr.odp_caps.per_transport_caps.ud_odp_caps;
-       resp.odp_caps.reserved = 0;
-#else
-       memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
 #endif
        resp.response_length += sizeof(resp.odp_caps);
 
@@ -3646,8 +3657,5 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
 
 end:
        err = ib_copy_to_udata(ucore, &resp, resp.response_length);
-       if (err)
-               return err;
-
-       return 0;
+       return err;
 }
index 39680aed99dd0d4593f91317e666393c562d33d3..28ba2cc815355e332dd9877178f38e76784cbcc6 100644 (file)
@@ -683,12 +683,28 @@ out:
        return ev_file;
 }
 
+static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
+{
+       u64 mask;
+
+       if (command <= IB_USER_VERBS_CMD_OPEN_QP)
+               mask = ib_dev->uverbs_cmd_mask;
+       else
+               mask = ib_dev->uverbs_ex_cmd_mask;
+
+       if (mask & ((u64)1 << command))
+               return 0;
+
+       return -1;
+}
+
 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                             size_t count, loff_t *pos)
 {
        struct ib_uverbs_file *file = filp->private_data;
        struct ib_device *ib_dev;
        struct ib_uverbs_cmd_hdr hdr;
+       __u32 command;
        __u32 flags;
        int srcu_key;
        ssize_t ret;
@@ -707,37 +723,34 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                goto out;
        }
 
-       flags = (hdr.command &
-                IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+       if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
+                                  IB_USER_VERBS_CMD_COMMAND_MASK)) {
+               ret = -EINVAL;
+               goto out;
+       }
 
-       if (!flags) {
-               __u32 command;
+       command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+       if (verify_command_mask(ib_dev, command)) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
 
-               if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
-                                          IB_USER_VERBS_CMD_COMMAND_MASK)) {
-                       ret = -EINVAL;
-                       goto out;
-               }
+       if (!file->ucontext &&
+           command != IB_USER_VERBS_CMD_GET_CONTEXT) {
+               ret = -EINVAL;
+               goto out;
+       }
 
-               command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+       flags = (hdr.command &
+                IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
 
+       if (!flags) {
                if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
                    !uverbs_cmd_table[command]) {
                        ret = -EINVAL;
                        goto out;
                }
 
-               if (!file->ucontext &&
-                   command != IB_USER_VERBS_CMD_GET_CONTEXT) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               if (!(ib_dev->uverbs_cmd_mask & (1ull << command))) {
-                       ret = -ENOSYS;
-                       goto out;
-               }
-
                if (hdr.in_words * 4 != count) {
                        ret = -EINVAL;
                        goto out;
@@ -749,21 +762,11 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                                                 hdr.out_words * 4);
 
        } else if (flags == IB_USER_VERBS_CMD_FLAG_EXTENDED) {
-               __u32 command;
-
                struct ib_uverbs_ex_cmd_hdr ex_hdr;
                struct ib_udata ucore;
                struct ib_udata uhw;
                size_t written_count = count;
 
-               if (hdr.command & ~(__u32)(IB_USER_VERBS_CMD_FLAGS_MASK |
-                                          IB_USER_VERBS_CMD_COMMAND_MASK)) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
-
                if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
                    !uverbs_ex_cmd_table[command]) {
                        ret = -ENOSYS;
@@ -775,11 +778,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
                        goto out;
                }
 
-               if (!(ib_dev->uverbs_ex_cmd_mask & (1ull << command))) {
-                       ret = -ENOSYS;
-                       goto out;
-               }
-
                if (count < (sizeof(hdr) + sizeof(ex_hdr))) {
                        ret = -EINVAL;
                        goto out;
@@ -1058,7 +1056,7 @@ static int find_overflow_devnum(void)
                ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
                                          "infiniband_verbs");
                if (ret) {
-                       printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+                       pr_err("user_verbs: couldn't register dynamic device number\n");
                        return ret;
                }
        }
@@ -1279,14 +1277,14 @@ static int __init ib_uverbs_init(void)
        ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
                                     "infiniband_verbs");
        if (ret) {
-               printk(KERN_ERR "user_verbs: couldn't register device number\n");
+               pr_err("user_verbs: couldn't register device number\n");
                goto out;
        }
 
        uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
        if (IS_ERR(uverbs_class)) {
                ret = PTR_ERR(uverbs_class);
-               printk(KERN_ERR "user_verbs: couldn't create class infiniband_verbs\n");
+               pr_err("user_verbs: couldn't create class infiniband_verbs\n");
                goto out_chrdev;
        }
 
@@ -1294,13 +1292,13 @@ static int __init ib_uverbs_init(void)
 
        ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
        if (ret) {
-               printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
+               pr_err("user_verbs: couldn't create abi_version attribute\n");
                goto out_class;
        }
 
        ret = ib_register_client(&uverbs_client);
        if (ret) {
-               printk(KERN_ERR "user_verbs: couldn't register client\n");
+               pr_err("user_verbs: couldn't register client\n");
                goto out_class;
        }
 
index 5af6d024e0538a9eb63049ef7009ad7dab09a967..15b8adbf39c0f46fcf25726a5eefc6cc24f2d046 100644 (file)
@@ -1551,6 +1551,46 @@ int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
 }
 EXPORT_SYMBOL(ib_check_mr_status);
 
+int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+                        int state)
+{
+       if (!device->set_vf_link_state)
+               return -ENOSYS;
+
+       return device->set_vf_link_state(device, vf, port, state);
+}
+EXPORT_SYMBOL(ib_set_vf_link_state);
+
+int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+                    struct ifla_vf_info *info)
+{
+       if (!device->get_vf_config)
+               return -ENOSYS;
+
+       return device->get_vf_config(device, vf, port, info);
+}
+EXPORT_SYMBOL(ib_get_vf_config);
+
+int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+                   struct ifla_vf_stats *stats)
+{
+       if (!device->get_vf_stats)
+               return -ENOSYS;
+
+       return device->get_vf_stats(device, vf, port, stats);
+}
+EXPORT_SYMBOL(ib_get_vf_stats);
+
+int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+                  int type)
+{
+       if (!device->set_vf_guid)
+               return -ENOSYS;
+
+       return device->set_vf_guid(device, vf, port, guid, type);
+}
+EXPORT_SYMBOL(ib_set_vf_guid);
+
 /**
  * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
  *     and set it the memory region.
@@ -1567,6 +1607,8 @@ EXPORT_SYMBOL(ib_check_mr_status);
  * - The last sg element is allowed to have length less than page_size.
  * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
  *   then only max_num_sg entries will be mapped.
+ * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS_REG, non of these
+ *   constraints holds and the page_size argument is ignored.
  *
  * Returns the number of sg elements that were mapped to the memory region.
  *
@@ -1657,3 +1699,167 @@ next_page:
        return i;
 }
 EXPORT_SYMBOL(ib_sg_to_pages);
+
+struct ib_drain_cqe {
+       struct ib_cqe cqe;
+       struct completion done;
+};
+
+static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
+                                               cqe);
+
+       complete(&cqe->done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the SQ.
+ */
+static void __ib_drain_sq(struct ib_qp *qp)
+{
+       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+       struct ib_drain_cqe sdrain;
+       struct ib_send_wr swr = {}, *bad_swr;
+       int ret;
+
+       if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
+               WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
+                         "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+               return;
+       }
+
+       swr.wr_cqe = &sdrain.cqe;
+       sdrain.cqe.done = ib_drain_qp_done;
+       init_completion(&sdrain.done);
+
+       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+       if (ret) {
+               WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+               return;
+       }
+
+       ret = ib_post_send(qp, &swr, &bad_swr);
+       if (ret) {
+               WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
+               return;
+       }
+
+       wait_for_completion(&sdrain.done);
+}
+
+/*
+ * Post a WR and block until its completion is reaped for the RQ.
+ */
+static void __ib_drain_rq(struct ib_qp *qp)
+{
+       struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
+       struct ib_drain_cqe rdrain;
+       struct ib_recv_wr rwr = {}, *bad_rwr;
+       int ret;
+
+       if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
+               WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
+                         "IB_POLL_DIRECT poll_ctx not supported for drain\n");
+               return;
+       }
+
+       rwr.wr_cqe = &rdrain.cqe;
+       rdrain.cqe.done = ib_drain_qp_done;
+       init_completion(&rdrain.done);
+
+       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+       if (ret) {
+               WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+               return;
+       }
+
+       ret = ib_post_recv(qp, &rwr, &bad_rwr);
+       if (ret) {
+               WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
+               return;
+       }
+
+       wait_for_completion(&rdrain.done);
+}
+
+/**
+ * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
+ *                application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_sq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and SQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_sq(struct ib_qp *qp)
+{
+       if (qp->device->drain_sq)
+               qp->device->drain_sq(qp);
+       else
+               __ib_drain_sq(qp);
+}
+EXPORT_SYMBOL(ib_drain_sq);
+
+/**
+ * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
+ *                application.
+ * @qp:            queue pair to drain
+ *
+ * If the device has a provider-specific drain function, then
+ * call that.  Otherwise call the generic drain function
+ * __ib_drain_rq().
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ and RQ for the drain work request and
+ * completion.
+ *
+ * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_rq(struct ib_qp *qp)
+{
+       if (qp->device->drain_rq)
+               qp->device->drain_rq(qp);
+       else
+               __ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_rq);
+
+/**
+ * ib_drain_qp() - Block until all CQEs have been consumed by the
+ *                application on both the RQ and SQ.
+ * @qp:            queue pair to drain
+ *
+ * The caller must:
+ *
+ * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
+ * and completions.
+ *
+ * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
+ * IB_POLL_DIRECT.
+ *
+ * ensure that there are no other contexts that are posting WRs concurrently.
+ * Otherwise the drain is not guaranteed.
+ */
+void ib_drain_qp(struct ib_qp *qp)
+{
+       ib_drain_sq(qp);
+       ib_drain_rq(qp);
+}
+EXPORT_SYMBOL(ib_drain_qp);
index aded2a5cc2d5d677cabe42707704119e68116c5d..c7ad0a4c8b1504e15fd165392119172a2b342c0a 100644 (file)
@@ -2,6 +2,7 @@ obj-$(CONFIG_INFINIBAND_MTHCA)          += mthca/
 obj-$(CONFIG_INFINIBAND_QIB)           += qib/
 obj-$(CONFIG_INFINIBAND_CXGB3)         += cxgb3/
 obj-$(CONFIG_INFINIBAND_CXGB4)         += cxgb4/
+obj-$(CONFIG_INFINIBAND_I40IW)         += i40iw/
 obj-$(CONFIG_MLX4_INFINIBAND)          += mlx4/
 obj-$(CONFIG_MLX5_INFINIBAND)          += mlx5/
 obj-$(CONFIG_INFINIBAND_NES)           += nes/
index f504ba73e5dc27200c988f342ee3d6423072ee9e..d403231a4aff8d571de464b6a9d5158ec5edd1d0 100644 (file)
@@ -1877,7 +1877,7 @@ err:
 static int is_loopback_dst(struct iw_cm_id *cm_id)
 {
        struct net_device *dev;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
 
        dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
        if (!dev)
@@ -1892,10 +1892,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct iwch_ep *ep;
        struct rtable *rt;
        int err = 0;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
 
-       if (cm_id->remote_addr.ss_family != PF_INET) {
+       if (cm_id->m_remote_addr.ss_family != PF_INET) {
                err = -ENOSYS;
                goto out;
        }
@@ -1961,9 +1961,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        state_set(&ep->com, CONNECTING);
        ep->tos = IPTOS_LOWDELAY;
-       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+       memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
               sizeof(ep->com.local_addr));
-       memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+       memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
               sizeof(ep->com.remote_addr));
 
        /* send connect request to rnic */
@@ -1992,7 +1992,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
 
        might_sleep();
 
-       if (cm_id->local_addr.ss_family != PF_INET) {
+       if (cm_id->m_local_addr.ss_family != PF_INET) {
                err = -ENOSYS;
                goto fail1;
        }
@@ -2008,7 +2008,7 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
        cm_id->add_ref(cm_id);
        ep->com.cm_id = cm_id;
        ep->backlog = backlog;
-       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+       memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
               sizeof(ep->com.local_addr));
 
        /*
index 2734820d291b02387b8e925bb5f09ab6e23418dd..42a7b8952d13241e40b531dc9959f3405f3a62a6 100644 (file)
@@ -657,7 +657,8 @@ err:
        return ERR_PTR(err);
 }
 
-static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                                  struct ib_udata *udata)
 {
        struct iwch_dev *rhp;
        struct iwch_pd *php;
index cd2ff5f9518a2b7143d53821672a5d98b9773b17..651711370d557f1798fdac34e2b57b9f15c1b3cf 100644 (file)
@@ -302,7 +302,7 @@ void _c4iw_free_ep(struct kref *kref)
                if (ep->com.remote_addr.ss_family == AF_INET6) {
                        struct sockaddr_in6 *sin6 =
                                        (struct sockaddr_in6 *)
-                                       &ep->com.mapped_local_addr;
+                                       &ep->com.local_addr;
 
                        cxgb4_clip_release(
                                        ep->com.dev->rdev.lldi.ports[0],
@@ -314,12 +314,6 @@ void _c4iw_free_ep(struct kref *kref)
                dst_release(ep->dst);
                cxgb4_l2t_release(ep->l2t);
        }
-       if (test_bit(RELEASE_MAPINFO, &ep->com.flags)) {
-               print_addr(&ep->com, __func__, "remove_mapinfo/mapping");
-               iwpm_remove_mapinfo(&ep->com.local_addr,
-                                   &ep->com.mapped_local_addr);
-               iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
-       }
        kfree(ep);
 }
 
@@ -455,7 +449,7 @@ static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
        state_set(&ep->com, DEAD);
        if (ep->com.remote_addr.ss_family == AF_INET6) {
                struct sockaddr_in6 *sin6 =
-                       (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+                       (struct sockaddr_in6 *)&ep->com.local_addr;
                cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
        }
@@ -485,12 +479,19 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
        unsigned int flowclen = 80;
        struct fw_flowc_wr *flowc;
        int i;
+       u16 vlan = ep->l2t->vlan;
+       int nparams;
+
+       if (vlan == CPL_L2T_VLAN_NONE)
+               nparams = 8;
+       else
+               nparams = 9;
 
        skb = get_skb(skb, flowclen, GFP_KERNEL);
        flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
 
        flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
-                                          FW_FLOWC_WR_NPARAMS_V(8));
+                                          FW_FLOWC_WR_NPARAMS_V(nparams));
        flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
                                          16)) | FW_WR_FLOWID_V(ep->hwtid));
 
@@ -511,9 +512,17 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
        flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
        flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
        flowc->mnemval[7].val = cpu_to_be32(ep->emss);
-       /* Pad WR to 16 byte boundary */
-       flowc->mnemval[8].mnemonic = 0;
-       flowc->mnemval[8].val = 0;
+       if (nparams == 9) {
+               u16 pri;
+
+               pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+               flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+               flowc->mnemval[8].val = cpu_to_be32(pri);
+       } else {
+               /* Pad WR to 16 byte boundary */
+               flowc->mnemval[8].mnemonic = 0;
+               flowc->mnemval[8].val = 0;
+       }
        for (i = 0; i < 9; i++) {
                flowc->mnemval[i].r4[0] = 0;
                flowc->mnemval[i].r4[1] = 0;
@@ -568,54 +577,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
        return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 }
 
-/*
- * c4iw_form_pm_msg - Form a port mapper message with mapping info
- */
-static void c4iw_form_pm_msg(struct c4iw_ep *ep,
-                               struct iwpm_sa_data *pm_msg)
-{
-       memcpy(&pm_msg->loc_addr, &ep->com.local_addr,
-               sizeof(ep->com.local_addr));
-       memcpy(&pm_msg->rem_addr, &ep->com.remote_addr,
-               sizeof(ep->com.remote_addr));
-}
-
-/*
- * c4iw_form_reg_msg - Form a port mapper message with dev info
- */
-static void c4iw_form_reg_msg(struct c4iw_dev *dev,
-                               struct iwpm_dev_data *pm_msg)
-{
-       memcpy(pm_msg->dev_name, dev->ibdev.name, IWPM_DEVNAME_SIZE);
-       memcpy(pm_msg->if_name, dev->rdev.lldi.ports[0]->name,
-                               IWPM_IFNAME_SIZE);
-}
-
-static void c4iw_record_pm_msg(struct c4iw_ep *ep,
-                       struct iwpm_sa_data *pm_msg)
-{
-       memcpy(&ep->com.mapped_local_addr, &pm_msg->mapped_loc_addr,
-               sizeof(ep->com.mapped_local_addr));
-       memcpy(&ep->com.mapped_remote_addr, &pm_msg->mapped_rem_addr,
-               sizeof(ep->com.mapped_remote_addr));
-}
-
-static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
-{
-       int ret;
-
-       print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
-       print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
-
-       ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
-                                  &child_ep->com.mapped_remote_addr,
-                                  &child_ep->com.remote_addr, RDMA_NL_C4IW);
-       if (ret)
-               PDBG("Unable to find remote peer addr info - err %d\n", ret);
-
-       return ret;
-}
-
 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
                     unsigned int *idx, int use_ts, int ipv6)
 {
@@ -645,13 +606,13 @@ static int send_connect(struct c4iw_ep *ep)
        int wscale;
        int win, sizev4, sizev6, wrlen;
        struct sockaddr_in *la = (struct sockaddr_in *)
-                                &ep->com.mapped_local_addr;
+                                &ep->com.local_addr;
        struct sockaddr_in *ra = (struct sockaddr_in *)
-                                &ep->com.mapped_remote_addr;
+                                &ep->com.remote_addr;
        struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
-                                  &ep->com.mapped_local_addr;
+                                  &ep->com.local_addr;
        struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
-                                  &ep->com.mapped_remote_addr;
+                                  &ep->com.remote_addr;
        int ret;
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
        u32 isn = (prandom_u32() & ~7UL) - 1;
@@ -710,7 +671,7 @@ static int send_connect(struct c4iw_ep *ep)
               L2T_IDX_V(ep->l2t->idx) |
               TX_CHAN_V(ep->tx_chan) |
               SMAC_SEL_V(ep->smac_idx) |
-              DSCP_V(ep->tos) |
+              DSCP_V(ep->tos >> 2) |
               ULP_MODE_V(ULP_MODE_TCPDDP) |
               RCV_BUFSIZ_V(win);
        opt2 = RX_CHANNEL_V(0) |
@@ -1829,10 +1790,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
        req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
                                     ep->com.dev->rdev.lldi.ports[0],
                                     ep->l2t));
-       sin = (struct sockaddr_in *)&ep->com.mapped_local_addr;
+       sin = (struct sockaddr_in *)&ep->com.local_addr;
        req->le.lport = sin->sin_port;
        req->le.u.ipv4.lip = sin->sin_addr.s_addr;
-       sin = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
+       sin = (struct sockaddr_in *)&ep->com.remote_addr;
        req->le.pport = sin->sin_port;
        req->le.u.ipv4.pip = sin->sin_addr.s_addr;
        req->tcb.t_state_to_astid =
@@ -1864,7 +1825,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
                L2T_IDX_V(ep->l2t->idx) |
                TX_CHAN_V(ep->tx_chan) |
                SMAC_SEL_V(ep->smac_idx) |
-               DSCP_V(ep->tos) |
+               DSCP_V(ep->tos >> 2) |
                ULP_MODE_V(ULP_MODE_TCPDDP) |
                RCV_BUFSIZ_V(win));
        req->tcb.opt2 = (__force __be32) (PACE_V(1) |
@@ -1928,7 +1889,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
 
 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
                     struct dst_entry *dst, struct c4iw_dev *cdev,
-                    bool clear_mpa_v1, enum chip_type adapter_type)
+                    bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
 {
        struct neighbour *n;
        int err, step;
@@ -1958,7 +1919,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
                        goto out;
                }
                ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
-                                       n, pdev, 0);
+                                       n, pdev, rt_tos2priority(tos));
                if (!ep->l2t)
                        goto out;
                ep->mtu = pdev->mtu;
@@ -2013,13 +1974,13 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
 {
        int err = 0;
        struct sockaddr_in *laddr = (struct sockaddr_in *)
-                                   &ep->com.cm_id->local_addr;
+                                   &ep->com.cm_id->m_local_addr;
        struct sockaddr_in *raddr = (struct sockaddr_in *)
-                                   &ep->com.cm_id->remote_addr;
+                                   &ep->com.cm_id->m_remote_addr;
        struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
-                                     &ep->com.cm_id->local_addr;
+                                     &ep->com.cm_id->m_local_addr;
        struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
-                                     &ep->com.cm_id->remote_addr;
+                                     &ep->com.cm_id->m_remote_addr;
        int iptype;
        __u8 *ra;
 
@@ -2038,10 +1999,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
        insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
 
        /* find a route */
-       if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
+       if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
                ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
                                     raddr->sin_addr.s_addr, laddr->sin_port,
-                                    raddr->sin_port, 0);
+                                    raddr->sin_port, ep->com.cm_id->tos);
                iptype = 4;
                ra = (__u8 *)&raddr->sin_addr;
        } else {
@@ -2058,7 +2019,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
                goto fail3;
        }
        err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
-                       ep->com.dev->rdev.lldi.adapter_type);
+                       ep->com.dev->rdev.lldi.adapter_type,
+                       ep->com.cm_id->tos);
        if (err) {
                pr_err("%s - cannot alloc l2e.\n", __func__);
                goto fail4;
@@ -2069,7 +2031,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
             ep->l2t->idx);
 
        state_set(&ep->com, CONNECTING);
-       ep->tos = 0;
+       ep->tos = ep->com.cm_id->tos;
 
        /* send connect request to rnic */
        err = send_connect(ep);
@@ -2109,10 +2071,10 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
        struct sockaddr_in6 *ra6;
 
        ep = lookup_atid(t, atid);
-       la = (struct sockaddr_in *)&ep->com.mapped_local_addr;
-       ra = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
-       la6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
-       ra6 = (struct sockaddr_in6 *)&ep->com.mapped_remote_addr;
+       la = (struct sockaddr_in *)&ep->com.local_addr;
+       ra = (struct sockaddr_in *)&ep->com.remote_addr;
+       la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+       ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
 
        PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
             status, status2errno(status));
@@ -2154,7 +2116,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
                        if (ep->com.remote_addr.ss_family == AF_INET6) {
                                struct sockaddr_in6 *sin6 =
                                                (struct sockaddr_in6 *)
-                                               &ep->com.mapped_local_addr;
+                                               &ep->com.local_addr;
                                cxgb4_clip_release(
                                                ep->com.dev->rdev.lldi.ports[0],
                                                (const u32 *)
@@ -2189,7 +2151,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
 
        if (ep->com.remote_addr.ss_family == AF_INET6) {
                struct sockaddr_in6 *sin6 =
-                       (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+                       (struct sockaddr_in6 *)&ep->com.local_addr;
                cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
        }
@@ -2391,6 +2353,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        u16 peer_mss = ntohs(req->tcpopt.mss);
        int iptype;
        unsigned short hdrs;
+       u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
 
        parent_ep = lookup_stid(t, stid);
        if (!parent_ep) {
@@ -2399,8 +2362,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        }
 
        if (state_read(&parent_ep->com) != LISTEN) {
-               printk(KERN_ERR "%s - listening ep not in LISTEN\n",
-                      __func__);
+               PDBG("%s - listening ep not in LISTEN\n", __func__);
                goto reject;
        }
 
@@ -2415,7 +2377,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                     ntohs(peer_port), peer_mss);
                dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
                                 local_port, peer_port,
-                                PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+                                tos);
        } else {
                PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
                     , __func__, parent_ep, hwtid,
@@ -2441,7 +2403,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        }
 
        err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
-                       parent_ep->com.dev->rdev.lldi.adapter_type);
+                       parent_ep->com.dev->rdev.lldi.adapter_type, tos);
        if (err) {
                printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
                       __func__);
@@ -2459,18 +2421,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
 
-       /*
-        * The mapped_local and mapped_remote addresses get setup with
-        * the actual 4-tuple.  The local address will be based on the
-        * actual local address of the connection, but on the port number
-        * of the parent listening endpoint.  The remote address is
-        * setup based on a query to the IWPM since we don't know what it
-        * originally was before mapping.  If no mapping was done, then
-        * mapped_remote == remote, and mapped_local == local.
-        */
        if (iptype == 4) {
                struct sockaddr_in *sin = (struct sockaddr_in *)
-                       &child_ep->com.mapped_local_addr;
+                       &child_ep->com.local_addr;
 
                sin->sin_family = PF_INET;
                sin->sin_port = local_port;
@@ -2482,12 +2435,12 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                                 &parent_ep->com.local_addr)->sin_port;
                sin->sin_addr.s_addr = *(__be32 *)local_ip;
 
-               sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
+               sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
                sin->sin_family = PF_INET;
                sin->sin_port = peer_port;
                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
        } else {
-               sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
                sin6->sin6_family = PF_INET6;
                sin6->sin6_port = local_port;
                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
@@ -2498,18 +2451,15 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
                                   &parent_ep->com.local_addr)->sin6_port;
                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
 
-               sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
                sin6->sin6_family = PF_INET6;
                sin6->sin6_port = peer_port;
                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
        }
-       memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
-              sizeof(child_ep->com.remote_addr));
-       get_remote_addr(parent_ep, child_ep);
 
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
-       child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+       child_ep->tos = tos;
        child_ep->dst = dst;
        child_ep->hwtid = hwtid;
 
@@ -2522,7 +2472,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        accept_cr(child_ep, skb, req);
        set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
        if (iptype == 6) {
-               sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_local_addr;
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
                cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
                               (const u32 *)&sin6->sin6_addr.s6_addr, 1);
        }
@@ -2765,7 +2715,7 @@ out:
                if (ep->com.remote_addr.ss_family == AF_INET6) {
                        struct sockaddr_in6 *sin6 =
                                        (struct sockaddr_in6 *)
-                                       &ep->com.mapped_local_addr;
+                                       &ep->com.local_addr;
                        cxgb4_clip_release(
                                        ep->com.dev->rdev.lldi.ports[0],
                                        (const u32 *)&sin6->sin6_addr.s6_addr,
@@ -3026,8 +2976,8 @@ static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
 {
        struct in_device *ind;
        int found = 0;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
 
        ind = in_dev_get(dev->rdev.lldi.ports[0]);
        if (!ind)
@@ -3072,8 +3022,8 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
 {
        struct in6_addr uninitialized_var(addr);
-       struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
-       struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+       struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+       struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
 
        if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
                memcpy(la6->sin6_addr.s6_addr, &addr, 16);
@@ -3092,11 +3042,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct sockaddr_in *raddr;
        struct sockaddr_in6 *laddr6;
        struct sockaddr_in6 *raddr6;
-       struct iwpm_dev_data pm_reg_msg;
-       struct iwpm_sa_data pm_msg;
        __u8 *ra;
        int iptype;
-       int iwpm_err = 0;
 
        if ((conn_param->ord > cur_max_read_depth(dev)) ||
            (conn_param->ird > cur_max_read_depth(dev))) {
@@ -3144,47 +3091,17 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
        insert_handle(dev, &dev->atid_idr, ep, ep->atid);
 
-       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+       memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
               sizeof(ep->com.local_addr));
-       memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+       memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
               sizeof(ep->com.remote_addr));
 
-       /* No port mapper available, go with the specified peer information */
-       memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
-              sizeof(ep->com.mapped_local_addr));
-       memcpy(&ep->com.mapped_remote_addr, &cm_id->remote_addr,
-              sizeof(ep->com.mapped_remote_addr));
-
-       c4iw_form_reg_msg(dev, &pm_reg_msg);
-       iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
-       if (iwpm_err) {
-               PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
-                       __func__, iwpm_err);
-       }
-       if (iwpm_valid_pid() && !iwpm_err) {
-               c4iw_form_pm_msg(ep, &pm_msg);
-               iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_C4IW);
-               if (iwpm_err)
-                       PDBG("%s: Port Mapper query fail (err = %d).\n",
-                               __func__, iwpm_err);
-               else
-                       c4iw_record_pm_msg(ep, &pm_msg);
-       }
-       if (iwpm_create_mapinfo(&ep->com.local_addr,
-                               &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
-               iwpm_remove_mapping(&ep->com.local_addr, RDMA_NL_C4IW);
-               err = -ENOMEM;
-               goto fail1;
-       }
-       print_addr(&ep->com, __func__, "add_query/create_mapinfo");
-       set_bit(RELEASE_MAPINFO, &ep->com.flags);
-
-       laddr = (struct sockaddr_in *)&ep->com.mapped_local_addr;
-       raddr = (struct sockaddr_in *)&ep->com.mapped_remote_addr;
-       laddr6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
-       raddr6 = (struct sockaddr_in6 *) &ep->com.mapped_remote_addr;
+       laddr = (struct sockaddr_in *)&ep->com.local_addr;
+       raddr = (struct sockaddr_in *)&ep->com.remote_addr;
+       laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+       raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
 
-       if (cm_id->remote_addr.ss_family == AF_INET) {
+       if (cm_id->m_remote_addr.ss_family == AF_INET) {
                iptype = 4;
                ra = (__u8 *)&raddr->sin_addr;
 
@@ -3203,7 +3120,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                     ra, ntohs(raddr->sin_port));
                ep->dst = find_route(dev, laddr->sin_addr.s_addr,
                                     raddr->sin_addr.s_addr, laddr->sin_port,
-                                    raddr->sin_port, 0);
+                                    raddr->sin_port, cm_id->tos);
        } else {
                iptype = 6;
                ra = (__u8 *)&raddr6->sin6_addr;
@@ -3234,7 +3151,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        }
 
        err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
-                       ep->com.dev->rdev.lldi.adapter_type);
+                       ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
        if (err) {
                printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
                goto fail3;
@@ -3245,7 +3162,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                ep->l2t->idx);
 
        state_set(&ep->com, CONNECTING);
-       ep->tos = 0;
+       ep->tos = cm_id->tos;
 
        /* send connect request to rnic */
        err = send_connect(ep);
@@ -3269,7 +3186,7 @@ static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
 {
        int err;
        struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
-                                   &ep->com.mapped_local_addr;
+                                   &ep->com.local_addr;
 
        if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
                err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
@@ -3302,7 +3219,7 @@ static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
 {
        int err;
        struct sockaddr_in *sin = (struct sockaddr_in *)
-                                 &ep->com.mapped_local_addr;
+                                 &ep->com.local_addr;
 
        if (dev->rdev.lldi.enable_fw_ofld_conn) {
                do {
@@ -3343,9 +3260,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        int err = 0;
        struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
        struct c4iw_listen_ep *ep;
-       struct iwpm_dev_data pm_reg_msg;
-       struct iwpm_sa_data pm_msg;
-       int iwpm_err = 0;
 
        might_sleep();
 
@@ -3360,7 +3274,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        ep->com.cm_id = cm_id;
        ep->com.dev = dev;
        ep->backlog = backlog;
-       memcpy(&ep->com.local_addr, &cm_id->local_addr,
+       memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
               sizeof(ep->com.local_addr));
 
        /*
@@ -3369,10 +3283,10 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        if (dev->rdev.lldi.enable_fw_ofld_conn &&
            ep->com.local_addr.ss_family == AF_INET)
                ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
-                                            cm_id->local_addr.ss_family, ep);
+                                            cm_id->m_local_addr.ss_family, ep);
        else
                ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
-                                           cm_id->local_addr.ss_family, ep);
+                                           cm_id->m_local_addr.ss_family, ep);
 
        if (ep->stid == -1) {
                printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
@@ -3381,36 +3295,9 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
        }
        insert_handle(dev, &dev->stid_idr, ep, ep->stid);
 
-       /* No port mapper available, go with the specified info */
-       memcpy(&ep->com.mapped_local_addr, &cm_id->local_addr,
-              sizeof(ep->com.mapped_local_addr));
-
-       c4iw_form_reg_msg(dev, &pm_reg_msg);
-       iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_C4IW);
-       if (iwpm_err) {
-               PDBG("%s: Port Mapper reg pid fail (err = %d).\n",
-                       __func__, iwpm_err);
-       }
-       if (iwpm_valid_pid() && !iwpm_err) {
-               memcpy(&pm_msg.loc_addr, &ep->com.local_addr,
-                               sizeof(ep->com.local_addr));
-               iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_C4IW);
-               if (iwpm_err)
-                       PDBG("%s: Port Mapper query fail (err = %d).\n",
-                               __func__, iwpm_err);
-               else
-                       memcpy(&ep->com.mapped_local_addr,
-                               &pm_msg.mapped_loc_addr,
-                               sizeof(ep->com.mapped_local_addr));
-       }
-       if (iwpm_create_mapinfo(&ep->com.local_addr,
-                               &ep->com.mapped_local_addr, RDMA_NL_C4IW)) {
-               err = -ENOMEM;
-               goto fail3;
-       }
-       print_addr(&ep->com, __func__, "add_mapping/create_mapinfo");
+       memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
+              sizeof(ep->com.local_addr));
 
-       set_bit(RELEASE_MAPINFO, &ep->com.flags);
        state_set(&ep->com, LISTEN);
        if (ep->com.local_addr.ss_family == AF_INET)
                err = create_server4(dev, ep);
@@ -3421,7 +3308,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
                goto out;
        }
 
-fail3:
        cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
                        ep->com.local_addr.ss_family);
 fail2:
@@ -3456,7 +3342,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
                                          0, 0, __func__);
-               sin6 = (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+               sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
                cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
        }
@@ -3580,7 +3466,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
        state_set(&ep->com, DEAD);
        if (ep->com.remote_addr.ss_family == AF_INET6) {
                struct sockaddr_in6 *sin6 =
-                       (struct sockaddr_in6 *)&ep->com.mapped_local_addr;
+                       (struct sockaddr_in6 *)&ep->com.local_addr;
                cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
                                   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
        }
index cf21df4a8bf5b80da00f6f82003980650df9c522..b4eeb783573c88e50904ff20668fdcd10ee86e4c 100644 (file)
@@ -815,8 +815,15 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                }
        }
 out:
-       if (wq)
+       if (wq) {
+               if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
+                       if (t4_sq_empty(wq))
+                               complete(&qhp->sq_drained);
+                       if (t4_rq_empty(wq))
+                               complete(&qhp->rq_drained);
+               }
                spin_unlock(&qhp->lock);
+       }
        return ret;
 }
 
index 8024ea4417b8735b93e77dc9310314db44b8833e..ae2e8b23d2dde589059b2c0a9ebdd077e17a3668 100644 (file)
@@ -87,17 +87,6 @@ struct c4iw_debugfs_data {
        int pos;
 };
 
-/* registered cxgb4 netlink callbacks */
-static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
-       [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
-       [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
-       [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
-       [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
-       [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
-       [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
-       [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
 static int count_idrs(int id, void *p, void *data)
 {
        int *countp = data;
@@ -242,13 +231,13 @@ static int dump_qp(int id, void *p, void *data)
        if (qp->ep) {
                if (qp->ep->com.local_addr.ss_family == AF_INET) {
                        struct sockaddr_in *lsin = (struct sockaddr_in *)
-                               &qp->ep->com.local_addr;
+                               &qp->ep->com.cm_id->local_addr;
                        struct sockaddr_in *rsin = (struct sockaddr_in *)
-                               &qp->ep->com.remote_addr;
+                               &qp->ep->com.cm_id->remote_addr;
                        struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
-                               &qp->ep->com.mapped_local_addr;
+                               &qp->ep->com.cm_id->m_local_addr;
                        struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
-                               &qp->ep->com.mapped_remote_addr;
+                               &qp->ep->com.cm_id->m_remote_addr;
 
                        cc = snprintf(qpd->buf + qpd->pos, space,
                                      "rc qp sq id %u rq id %u state %u "
@@ -264,15 +253,15 @@ static int dump_qp(int id, void *p, void *data)
                                      ntohs(mapped_rsin->sin_port));
                } else {
                        struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
-                               &qp->ep->com.local_addr;
+                               &qp->ep->com.cm_id->local_addr;
                        struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
-                               &qp->ep->com.remote_addr;
+                               &qp->ep->com.cm_id->remote_addr;
                        struct sockaddr_in6 *mapped_lsin6 =
                                (struct sockaddr_in6 *)
-                               &qp->ep->com.mapped_local_addr;
+                               &qp->ep->com.cm_id->m_local_addr;
                        struct sockaddr_in6 *mapped_rsin6 =
                                (struct sockaddr_in6 *)
-                               &qp->ep->com.mapped_remote_addr;
+                               &qp->ep->com.cm_id->m_remote_addr;
 
                        cc = snprintf(qpd->buf + qpd->pos, space,
                                      "rc qp sq id %u rq id %u state %u "
@@ -545,13 +534,13 @@ static int dump_ep(int id, void *p, void *data)
 
        if (ep->com.local_addr.ss_family == AF_INET) {
                struct sockaddr_in *lsin = (struct sockaddr_in *)
-                       &ep->com.local_addr;
+                       &ep->com.cm_id->local_addr;
                struct sockaddr_in *rsin = (struct sockaddr_in *)
-                       &ep->com.remote_addr;
+                       &ep->com.cm_id->remote_addr;
                struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
-                       &ep->com.mapped_local_addr;
+                       &ep->com.cm_id->m_local_addr;
                struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
-                       &ep->com.mapped_remote_addr;
+                       &ep->com.cm_id->m_remote_addr;
 
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -569,13 +558,13 @@ static int dump_ep(int id, void *p, void *data)
                              ntohs(mapped_rsin->sin_port));
        } else {
                struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
-                       &ep->com.local_addr;
+                       &ep->com.cm_id->local_addr;
                struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
-                       &ep->com.remote_addr;
+                       &ep->com.cm_id->remote_addr;
                struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
-                       &ep->com.mapped_local_addr;
+                       &ep->com.cm_id->m_local_addr;
                struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
-                       &ep->com.mapped_remote_addr;
+                       &ep->com.cm_id->m_remote_addr;
 
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p qp %p state %d flags 0x%lx "
@@ -610,9 +599,9 @@ static int dump_listen_ep(int id, void *p, void *data)
 
        if (ep->com.local_addr.ss_family == AF_INET) {
                struct sockaddr_in *lsin = (struct sockaddr_in *)
-                       &ep->com.local_addr;
+                       &ep->com.cm_id->local_addr;
                struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
-                       &ep->com.mapped_local_addr;
+                       &ep->com.cm_id->m_local_addr;
 
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -623,9 +612,9 @@ static int dump_listen_ep(int id, void *p, void *data)
                              ntohs(mapped_lsin->sin_port));
        } else {
                struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
-                       &ep->com.local_addr;
+                       &ep->com.cm_id->local_addr;
                struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
-                       &ep->com.mapped_local_addr;
+                       &ep->com.cm_id->m_local_addr;
 
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p state %d flags 0x%lx stid %d "
@@ -801,10 +790,9 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
             rdev->lldi.vr->qp.size,
             rdev->lldi.vr->cq.start,
             rdev->lldi.vr->cq.size);
-       PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
+       PDBG("udb %pR db_reg %p gts_reg %p "
             "qpmask 0x%x cqmask 0x%x\n",
-            (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
-            (void *)pci_resource_start(rdev->lldi.pdev, 2),
+               &rdev->lldi.pdev->resource[2],
             rdev->lldi.db_reg, rdev->lldi.gts_reg,
             rdev->qpmask, rdev->cqmask);
 
@@ -1506,20 +1494,6 @@ static int __init c4iw_init_module(void)
                printk(KERN_WARNING MOD
                       "could not create debugfs entry, continuing\n");
 
-       if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
-                           c4iw_nl_cb_table))
-               pr_err("%s[%u]: Failed to add netlink callback\n"
-                      , __func__, __LINE__);
-
-       err = iwpm_init(RDMA_NL_C4IW);
-       if (err) {
-               pr_err("port mapper initialization failed with %d\n", err);
-               ibnl_remove_client(RDMA_NL_C4IW);
-               c4iw_cm_term();
-               debugfs_remove_recursive(c4iw_debugfs_root);
-               return err;
-       }
-
        cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
 
        return 0;
@@ -1537,8 +1511,6 @@ static void __exit c4iw_exit_module(void)
        }
        mutex_unlock(&dev_mutex);
        cxgb4_unregister_uld(CXGB4_ULD_RDMA);
-       iwpm_exit(RDMA_NL_C4IW);
-       ibnl_remove_client(RDMA_NL_C4IW);
        c4iw_cm_term();
        debugfs_remove_recursive(c4iw_debugfs_root);
 }
index fb2de75a039216e2f732a8f61ba387de121f1ce7..df43f871ab61cba60d44986cc4ac17c7881a3777 100644 (file)
@@ -476,6 +476,8 @@ struct c4iw_qp {
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
+       struct completion rq_drained;
+       struct completion sq_drained;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -753,7 +755,6 @@ enum c4iw_ep_flags {
        CLOSE_SENT              = 3,
        TIMEOUT                 = 4,
        QP_REFERENCED           = 5,
-       RELEASE_MAPINFO         = 6,
 };
 
 enum c4iw_ep_history {
@@ -790,8 +791,6 @@ struct c4iw_ep_common {
        struct mutex mutex;
        struct sockaddr_storage local_addr;
        struct sockaddr_storage remote_addr;
-       struct sockaddr_storage mapped_local_addr;
-       struct sockaddr_storage mapped_remote_addr;
        struct c4iw_wr_wait wr_wait;
        unsigned long flags;
        unsigned long history;
@@ -843,45 +842,6 @@ struct c4iw_ep {
        struct c4iw_ep_stats stats;
 };
 
-static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
-                             const char *msg)
-{
-
-#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
-#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
-#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
-#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
-
-       if (c4iw_debug) {
-               switch (epc->local_addr.ss_family) {
-               case AF_INET:
-                       PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
-                            func, msg, SINA(&epc->local_addr),
-                            SINP(&epc->local_addr),
-                            SINP(&epc->mapped_local_addr),
-                            SINA(&epc->remote_addr),
-                            SINP(&epc->remote_addr),
-                            SINP(&epc->mapped_remote_addr));
-                       break;
-               case AF_INET6:
-                       PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
-                            func, msg, SIN6A(&epc->local_addr),
-                            SIN6P(&epc->local_addr),
-                            SIN6P(&epc->mapped_local_addr),
-                            SIN6A(&epc->remote_addr),
-                            SIN6P(&epc->remote_addr),
-                            SIN6P(&epc->mapped_remote_addr));
-                       break;
-               default:
-                       break;
-               }
-       }
-#undef SINA
-#undef SINP
-#undef SIN6A
-#undef SIN6P
-}
-
 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 {
        return cm_id->provider_data;
@@ -961,7 +921,8 @@ int c4iw_map_mr_sg(struct ib_mr *ibmr,
                   struct scatterlist *sg,
                   int sg_nents);
 int c4iw_dealloc_mw(struct ib_mw *mw);
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                           struct ib_udata *udata);
 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
                                           u64 length, u64 virt, int acc,
                                           struct ib_udata *udata);
@@ -1016,6 +977,8 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
+void c4iw_drain_rq(struct ib_qp *qp);
+void c4iw_drain_sq(struct ib_qp *qp);
 
 
 #endif
index 7849890c478141e57136127c84f4cad9fd46227d..008be07d560432b7a0954a0e7676e71b2ef61d04 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/moduleparam.h>
 #include <rdma/ib_umem.h>
 #include <linux/atomic.h>
+#include <rdma/ib_user_verbs.h>
 
 #include "iw_cxgb4.h"
 
@@ -552,7 +553,8 @@ err:
        return ERR_PTR(err);
 }
 
-struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                           struct ib_udata *udata)
 {
        struct c4iw_dev *rhp;
        struct c4iw_pd *php;
@@ -617,12 +619,14 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
        int ret = 0;
        int length = roundup(max_num_sg * sizeof(u64), 32);
 
+       php = to_c4iw_pd(pd);
+       rhp = php->rhp;
+
        if (mr_type != IB_MR_TYPE_MEM_REG ||
-           max_num_sg > t4_max_fr_depth(use_dsgl))
+           max_num_sg > t4_max_fr_depth(&rhp->rdev.lldi.ulptx_memwrite_dsgl &&
+                                        use_dsgl))
                return ERR_PTR(-EINVAL);
 
-       php = to_c4iw_pd(pd);
-       rhp = php->rhp;
        mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
        if (!mhp) {
                ret = -ENOMEM;
index ec04272fbdc2ffbf882318678332a506c1f1d4ec..124682dc57094cfb3970167bf2172863e4f7e0f5 100644 (file)
@@ -339,7 +339,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
        props->max_mr = c4iw_num_stags(&dev->rdev);
        props->max_pd = T4_MAX_NUM_PD;
        props->local_ca_ack_delay = 0;
-       props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
+       props->max_fast_reg_page_list_len =
+               t4_max_fr_depth(dev->rdev.lldi.ulptx_memwrite_dsgl && use_dsgl);
 
        return 0;
 }
@@ -564,6 +565,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.get_protocol_stats = c4iw_get_mib;
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
        dev->ibdev.get_port_immutable = c4iw_port_immutable;
+       dev->ibdev.drain_sq = c4iw_drain_sq;
+       dev->ibdev.drain_rq = c4iw_drain_rq;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index e99345eb875aa286a8b962696eba9742189e660c..e17fb5d5e0339ac2e29541e44c145a51d0d19d2f 100644 (file)
@@ -606,7 +606,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
 }
 
 static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
-                       struct ib_reg_wr *wr, u8 *len16, u8 t5dev)
+                       struct ib_reg_wr *wr, u8 *len16, bool dsgl_supported)
 {
        struct c4iw_mr *mhp = to_c4iw_mr(wr->mr);
        struct fw_ri_immd *imdp;
@@ -615,7 +615,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
        int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
        int rem;
 
-       if (mhp->mpl_len > t4_max_fr_depth(use_dsgl))
+       if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
                return -EINVAL;
 
        wqe->fr.qpbinde_to_dcacpu = 0;
@@ -629,7 +629,7 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
        wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
                                        0xffffffff);
 
-       if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
+       if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
                struct fw_ri_dsgl *sglp;
 
                for (i = 0; i < mhp->mpl_len; i++)
@@ -808,9 +808,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        fw_opcode = FW_RI_FR_NSMR_WR;
                        swsqe->opcode = FW_RI_FAST_REGISTER;
                        err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16,
-                                          is_t5(
-                                          qhp->rhp->rdev.lldi.adapter_type) ?
-                                          1 : 0);
+                               qhp->rhp->rdev.lldi.ulptx_memwrite_dsgl);
                        break;
                case IB_WR_LOCAL_INV:
                        if (wr->send_flags & IB_SEND_FENCE)
@@ -1621,7 +1619,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        unsigned int sqsize, rqsize;
        struct c4iw_ucontext *ucontext;
        int ret;
-       struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL;
+       struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
+       struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
 
        PDBG("%s ib_pd %p\n", __func__, pd);
 
@@ -1697,6 +1696,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
+       init_completion(&qhp->sq_drained);
+       init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
@@ -1706,29 +1707,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                goto err2;
 
        if (udata) {
-               mm1 = kmalloc(sizeof *mm1, GFP_KERNEL);
-               if (!mm1) {
+               sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
+               if (!sq_key_mm) {
                        ret = -ENOMEM;
                        goto err3;
                }
-               mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
-               if (!mm2) {
+               rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
+               if (!rq_key_mm) {
                        ret = -ENOMEM;
                        goto err4;
                }
-               mm3 = kmalloc(sizeof *mm3, GFP_KERNEL);
-               if (!mm3) {
+               sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
+               if (!sq_db_key_mm) {
                        ret = -ENOMEM;
                        goto err5;
                }
-               mm4 = kmalloc(sizeof *mm4, GFP_KERNEL);
-               if (!mm4) {
+               rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
+               if (!rq_db_key_mm) {
                        ret = -ENOMEM;
                        goto err6;
                }
                if (t4_sq_onchip(&qhp->wq.sq)) {
-                       mm5 = kmalloc(sizeof *mm5, GFP_KERNEL);
-                       if (!mm5) {
+                       ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
+                                                GFP_KERNEL);
+                       if (!ma_sync_key_mm) {
                                ret = -ENOMEM;
                                goto err7;
                        }
@@ -1743,7 +1745,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                uresp.rq_size = qhp->wq.rq.size;
                uresp.rq_memsize = qhp->wq.rq.memsize;
                spin_lock(&ucontext->mmap_lock);
-               if (mm5) {
+               if (ma_sync_key_mm) {
                        uresp.ma_sync_key = ucontext->key;
                        ucontext->key += PAGE_SIZE;
                } else {
@@ -1761,28 +1763,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
                if (ret)
                        goto err8;
-               mm1->key = uresp.sq_key;
-               mm1->addr = qhp->wq.sq.phys_addr;
-               mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize);
-               insert_mmap(ucontext, mm1);
-               mm2->key = uresp.rq_key;
-               mm2->addr = virt_to_phys(qhp->wq.rq.queue);
-               mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
-               insert_mmap(ucontext, mm2);
-               mm3->key = uresp.sq_db_gts_key;
-               mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa;
-               mm3->len = PAGE_SIZE;
-               insert_mmap(ucontext, mm3);
-               mm4->key = uresp.rq_db_gts_key;
-               mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa;
-               mm4->len = PAGE_SIZE;
-               insert_mmap(ucontext, mm4);
-               if (mm5) {
-                       mm5->key = uresp.ma_sync_key;
-                       mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0)
-                                   + PCIE_MA_SYNC_A) & PAGE_MASK;
-                       mm5->len = PAGE_SIZE;
-                       insert_mmap(ucontext, mm5);
+               sq_key_mm->key = uresp.sq_key;
+               sq_key_mm->addr = qhp->wq.sq.phys_addr;
+               sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
+               insert_mmap(ucontext, sq_key_mm);
+               rq_key_mm->key = uresp.rq_key;
+               rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
+               rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
+               insert_mmap(ucontext, rq_key_mm);
+               sq_db_key_mm->key = uresp.sq_db_gts_key;
+               sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
+               sq_db_key_mm->len = PAGE_SIZE;
+               insert_mmap(ucontext, sq_db_key_mm);
+               rq_db_key_mm->key = uresp.rq_db_gts_key;
+               rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa;
+               rq_db_key_mm->len = PAGE_SIZE;
+               insert_mmap(ucontext, rq_db_key_mm);
+               if (ma_sync_key_mm) {
+                       ma_sync_key_mm->key = uresp.ma_sync_key;
+                       ma_sync_key_mm->addr =
+                               (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
+                               PCIE_MA_SYNC_A) & PAGE_MASK;
+                       ma_sync_key_mm->len = PAGE_SIZE;
+                       insert_mmap(ucontext, ma_sync_key_mm);
                }
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
@@ -1795,15 +1798,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
             qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
        return &qhp->ibqp;
 err8:
-       kfree(mm5);
+       kfree(ma_sync_key_mm);
 err7:
-       kfree(mm4);
+       kfree(rq_db_key_mm);
 err6:
-       kfree(mm3);
+       kfree(sq_db_key_mm);
 err5:
-       kfree(mm2);
+       kfree(rq_key_mm);
 err4:
-       kfree(mm1);
+       kfree(sq_key_mm);
 err3:
        remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
 err2:
@@ -1888,3 +1891,17 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
+
+void c4iw_drain_sq(struct ib_qp *ibqp)
+{
+       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+       wait_for_completion(&qp->sq_drained);
+}
+
+void c4iw_drain_rq(struct ib_qp *ibqp)
+{
+       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+
+       wait_for_completion(&qp->rq_drained);
+}
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig
new file mode 100644 (file)
index 0000000..6e7d27a
--- /dev/null
@@ -0,0 +1,7 @@
+config INFINIBAND_I40IW
+       tristate "Intel(R) Ethernet X722 iWARP Driver"
+       depends on INET && I40E
+       select GENERIC_ALLOCATOR
+       ---help---
+       Intel(R) Ethernet X722 iWARP Driver
+       INET && I40IW && INFINIBAND && I40E
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
new file mode 100644 (file)
index 0000000..90068c0
--- /dev/null
@@ -0,0 +1,9 @@
+ccflags-y :=  -Idrivers/net/ethernet/intel/i40e
+
+obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
+
+i40iw-objs :=\
+               i40iw_cm.o i40iw_ctrl.o \
+               i40iw_hmc.o i40iw_hw.o i40iw_main.o  \
+               i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \
+               i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
new file mode 100644 (file)
index 0000000..8197676
--- /dev/null
@@ -0,0 +1,570 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_IW_H
+#define I40IW_IW_H
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/crc32c.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_pack.h>
+#include <rdma/rdma_cm.h>
+#include <rdma/iw_cm.h>
+#include <rdma/iw_portmap.h>
+#include <rdma/rdma_netlink.h>
+#include <crypto/hash.h>
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_d.h"
+#include "i40iw_hmc.h"
+
+#include <i40e_client.h>
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_ucontext.h"
+#include "i40iw_pble.h"
+#include "i40iw_verbs.h"
+#include "i40iw_cm.h"
+#include "i40iw_user.h"
+#include "i40iw_puda.h"
+
+#define I40IW_FW_VERSION  2
+#define I40IW_HW_VERSION  2
+
+#define I40IW_ARP_ADD     1
+#define I40IW_ARP_DELETE  2
+#define I40IW_ARP_RESOLVE 3
+
+#define I40IW_MACIP_ADD     1
+#define I40IW_MACIP_DELETE  2
+
+#define IW_CCQ_SIZE         (I40IW_CQP_SW_SQSIZE_2048 + 1)
+#define IW_CEQ_SIZE         2048
+#define IW_AEQ_SIZE         2048
+
+#define RX_BUF_SIZE            (1536 + 8)
+#define IW_REG0_SIZE           (4 * 1024)
+#define IW_TX_TIMEOUT          (6 * HZ)
+#define IW_FIRST_QPN           1
+#define IW_SW_CONTEXT_ALIGN    1024
+
+#define MAX_DPC_ITERATIONS             128
+
+#define I40IW_EVENT_TIMEOUT            100000
+#define I40IW_VCHNL_EVENT_TIMEOUT      100000
+
+#define        I40IW_NO_VLAN                   0xffff
+#define        I40IW_NO_QSET                   0xffff
+
+/* access to mcast filter list */
+#define IW_ADD_MCAST false
+#define IW_DEL_MCAST true
+
+#define I40IW_DRV_OPT_ENABLE_MPA_VER_0     0x00000001
+#define I40IW_DRV_OPT_DISABLE_MPA_CRC      0x00000002
+#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE  0x00000004
+#define I40IW_DRV_OPT_DISABLE_INTF         0x00000008
+#define I40IW_DRV_OPT_ENABLE_MSI           0x00000010
+#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT    0x00000020
+#define I40IW_DRV_OPT_NO_INLINE_DATA       0x00000080
+#define I40IW_DRV_OPT_DISABLE_INT_MOD      0x00000100
+#define I40IW_DRV_OPT_DISABLE_VIRT_WQ      0x00000200
+#define I40IW_DRV_OPT_ENABLE_PAU           0x00000400
+#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP    0x00000800
+
+#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
+#define IW_CFG_FPM_QP_COUNT            32768
+
+#define I40IW_MTU_TO_MSS               40
+#define I40IW_DEFAULT_MSS              1460
+
+struct i40iw_cqp_compl_info {
+       u32 op_ret_val;
+       u16 maj_err_code;
+       u16 min_err_code;
+       bool error;
+       u8 op_code;
+};
+
+#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args)
+
+#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args)
+
+#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args)
+
+struct i40iw_cqp_request {
+       struct cqp_commands_info info;
+       wait_queue_head_t waitq;
+       struct list_head list;
+       atomic_t refcount;
+       void (*callback_fcn)(struct i40iw_cqp_request*, u32);
+       void *param;
+       struct i40iw_cqp_compl_info compl_info;
+       bool waiting;
+       bool request_done;
+       bool dynamic;
+};
+
+struct i40iw_cqp {
+       struct i40iw_sc_cqp sc_cqp;
+       spinlock_t req_lock; /*cqp request list */
+       wait_queue_head_t waitq;
+       struct i40iw_dma_mem sq;
+       struct i40iw_dma_mem host_ctx;
+       u64 *scratch_array;
+       struct i40iw_cqp_request *cqp_requests;
+       struct list_head cqp_avail_reqs;
+       struct list_head cqp_pending_reqs;
+};
+
+struct i40iw_device;
+
+struct i40iw_ccq {
+       struct i40iw_sc_cq sc_cq;
+       spinlock_t lock; /* ccq control */
+       wait_queue_head_t waitq;
+       struct i40iw_dma_mem mem_cq;
+       struct i40iw_dma_mem shadow_area;
+};
+
+struct i40iw_ceq {
+       struct i40iw_sc_ceq sc_ceq;
+       struct i40iw_dma_mem mem;
+       u32 irq;
+       u32 msix_idx;
+       struct i40iw_device *iwdev;
+       struct tasklet_struct dpc_tasklet;
+};
+
+struct i40iw_aeq {
+       struct i40iw_sc_aeq sc_aeq;
+       struct i40iw_dma_mem mem;
+};
+
+struct i40iw_arp_entry {
+       u32 ip_addr[4];
+       u8 mac_addr[ETH_ALEN];
+};
+
+enum init_completion_state {
+       INVALID_STATE = 0,
+       INITIAL_STATE,
+       CQP_CREATED,
+       HMC_OBJS_CREATED,
+       PBLE_CHUNK_MEM,
+       CCQ_CREATED,
+       AEQ_CREATED,
+       CEQ_CREATED,
+       ILQ_CREATED,
+       IEQ_CREATED,
+       INET_NOTIFIER,
+       IP_ADDR_REGISTERED,
+       RDMA_DEV_REGISTERED
+};
+
+struct i40iw_msix_vector {
+       u32 idx;
+       u32 irq;
+       u32 cpu_affinity;
+       u32 ceq_id;
+};
+
+#define I40IW_MSIX_TABLE_SIZE   65
+
+struct virtchnl_work {
+       struct work_struct work;
+       union {
+               struct i40iw_cqp_request *cqp_request;
+               struct i40iw_virtchnl_work_info work_info;
+       };
+};
+
+struct i40e_qvlist_info;
+
+struct i40iw_device {
+       struct i40iw_ib_device *iwibdev;
+       struct net_device *netdev;
+       wait_queue_head_t vchnl_waitq;
+       struct i40iw_sc_dev sc_dev;
+       struct i40iw_handler *hdl;
+       struct i40e_info *ldev;
+       struct i40e_client *client;
+       struct i40iw_hw hw;
+       struct i40iw_cm_core cm_core;
+       unsigned long *mem_resources;
+       unsigned long *allocated_qps;
+       unsigned long *allocated_cqs;
+       unsigned long *allocated_mrs;
+       unsigned long *allocated_pds;
+       unsigned long *allocated_arps;
+       struct i40iw_qp **qp_table;
+       bool msix_shared;
+       u32 msix_count;
+       struct i40iw_msix_vector *iw_msixtbl;
+       struct i40e_qvlist_info *iw_qvlist;
+
+       struct i40iw_hmc_pble_rsrc *pble_rsrc;
+       struct i40iw_arp_entry *arp_table;
+       struct i40iw_cqp cqp;
+       struct i40iw_ccq ccq;
+       u32 ceqs_count;
+       struct i40iw_ceq *ceqlist;
+       struct i40iw_aeq aeq;
+       u32 arp_table_size;
+       u32 next_arp_index;
+       spinlock_t resource_lock; /* hw resource access */
+       u32 vendor_id;
+       u32 vendor_part_id;
+       u32 of_device_registered;
+
+       u32 device_cap_flags;
+       unsigned long db_start;
+       u8 resource_profile;
+       u8 max_rdma_vfs;
+       u8 max_enabled_vfs;
+       u8 max_sge;
+       u8 iw_status;
+       u8 send_term_ok;
+       bool push_mode;         /* Initialized from parameter passed to driver */
+
+       /* x710 specific */
+       struct mutex pbl_mutex;
+       struct tasklet_struct dpc_tasklet;
+       struct workqueue_struct *virtchnl_wq;
+       struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT];
+       struct i40iw_dma_mem obj_mem;
+       struct i40iw_dma_mem obj_next;
+       u8 *hmc_info_mem;
+       u32 sd_type;
+       struct workqueue_struct *param_wq;
+       atomic_t params_busy;
+       u32 mss;
+       enum init_completion_state init_state;
+       u16 mac_ip_table_idx;
+       atomic_t vchnl_msgs;
+       u32 max_mr;
+       u32 max_qp;
+       u32 max_cq;
+       u32 max_pd;
+       u32 next_qp;
+       u32 next_cq;
+       u32 next_pd;
+       u32 max_mr_size;
+       u32 max_qp_wr;
+       u32 max_cqe;
+       u32 mr_stagmask;
+       u32 mpa_version;
+       bool dcb;
+};
+
+struct i40iw_ib_device {
+       struct ib_device ibdev;
+       struct i40iw_device *iwdev;
+};
+
+struct i40iw_handler {
+       struct list_head list;
+       struct i40e_client *client;
+       struct i40iw_device device;
+       struct i40e_info ldev;
+};
+
+/**
+ * to_iwdev - get device
+ * @ibdev: ib device
+ **/
+static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev)
+{
+       return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev;
+}
+
+/**
+ * to_ucontext - get user context
+ * @ibucontext: ib user context
+ **/
+static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext)
+{
+       return container_of(ibucontext, struct i40iw_ucontext, ibucontext);
+}
+
+/**
+ * to_iwpd - get protection domain
+ * @ibpd: ib pd
+ **/
+static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd)
+{
+       return container_of(ibpd, struct i40iw_pd, ibpd);
+}
+
+/**
+ * to_iwmr - get device memory region
+ * @ibdev: ib memory region
+ **/
+static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr)
+{
+       return container_of(ibmr, struct i40iw_mr, ibmr);
+}
+
+/**
+ * to_iwmr_from_ibfmr - get device memory region
+ * @ibfmr: ib fmr
+ **/
+static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr)
+{
+       return container_of(ibfmr, struct i40iw_mr, ibfmr);
+}
+
+/**
+ * to_iwmw - get device memory window
+ * @ibmw: ib memory window
+ **/
+static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw)
+{
+       return container_of(ibmw, struct i40iw_mr, ibmw);
+}
+
+/**
+ * to_iwcq - get completion queue
+ * @ibcq: ib cqdevice
+ **/
+static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq)
+{
+       return container_of(ibcq, struct i40iw_cq, ibcq);
+}
+
+/**
+ * to_iwqp - get device qp
+ * @ibqp: ib qp
+ **/
+static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp)
+{
+       return container_of(ibqp, struct i40iw_qp, ibqp);
+}
+
+/* i40iw.c */
+void i40iw_add_ref(struct ib_qp *);
+void i40iw_rem_ref(struct ib_qp *);
+struct ib_qp *i40iw_get_qp(struct ib_device *, int);
+
+void i40iw_flush_wqes(struct i40iw_device *iwdev,
+                     struct i40iw_qp *qp);
+
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+                           unsigned char *mac_addr,
+                           __be32 *ip_addr,
+                           bool ipv4,
+                           u32 action);
+
+int i40iw_manage_apbvt(struct i40iw_device *iwdev,
+                      u16 accel_local_port,
+                      bool add_port);
+
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait);
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request);
+
+/**
+ * i40iw_alloc_resource - allocate a resource
+ * @iwdev: device pointer
+ * @resource_array: resource bit array:
+ * @max_resources: maximum resource number
+ * @req_resources_num: Allocated resource number
+ * @next: next free id
+ **/
+static inline int i40iw_alloc_resource(struct i40iw_device *iwdev,
+                                      unsigned long *resource_array,
+                                      u32 max_resources,
+                                      u32 *req_resource_num,
+                                      u32 *next)
+{
+       u32 resource_num;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iwdev->resource_lock, flags);
+       resource_num = find_next_zero_bit(resource_array, max_resources, *next);
+       if (resource_num >= max_resources) {
+               resource_num = find_first_zero_bit(resource_array, max_resources);
+               if (resource_num >= max_resources) {
+                       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+                       return -EOVERFLOW;
+               }
+       }
+       set_bit(resource_num, resource_array);
+       *next = resource_num + 1;
+       if (*next == max_resources)
+               *next = 0;
+       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+       *req_resource_num = resource_num;
+
+       return 0;
+}
+
+/**
+ * i40iw_is_resource_allocated - detrmine if resource is
+ * allocated
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to check
+ **/
+static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev,
+                                              unsigned long *resource_array,
+                                              u32 resource_num)
+{
+       bool bit_is_set;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iwdev->resource_lock, flags);
+
+       bit_is_set = test_bit(resource_num, resource_array);
+       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+
+       return bit_is_set;
+}
+
+/**
+ * i40iw_free_resource - free a resource
+ * @iwdev: device pointer
+ * @resource_array: resource array for the resource_num
+ * @resource_num: resource number to free
+ **/
+static inline void i40iw_free_resource(struct i40iw_device *iwdev,
+                                      unsigned long *resource_array,
+                                      u32 resource_num)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&iwdev->resource_lock, flags);
+       clear_bit(resource_num, resource_array);
+       spin_unlock_irqrestore(&iwdev->resource_lock, flags);
+}
+
+/**
+ * to_iwhdl - Get the handler from the device pointer
+ * @iwdev: device pointer
+ **/
+static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev)
+{
+       return container_of(iw_dev, struct i40iw_handler, device);
+}
+
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev);
+
+/**
+ * iw_init_resources -
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev);
+
+int i40iw_register_rdma_device(struct i40iw_device *iwdev);
+void i40iw_port_ibevent(struct i40iw_device *iwdev);
+int i40iw_cm_disconn(struct i40iw_qp *);
+void i40iw_cm_disconn_worker(void *);
+int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *,
+                    struct sk_buff *);
+
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+                                          struct i40iw_cqp_request *cqp_request);
+enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev,
+                                         u8 *mac_addr, u8 *mac_index);
+int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
+
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+                       struct i40iw_modify_qp_info *info, bool wait);
+
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+                                         struct i40iw_cm_info *cminfo,
+                                         enum i40iw_quad_entry_type etype,
+                                         enum i40iw_quad_hash_manage_type mtype,
+                                         void *cmnode,
+                                         bool wait);
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf);
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp);
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+                            struct i40iw_qp *iwqp,
+                            u32 qp_num);
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+                                            struct i40iw_dma_mem *memptr,
+                                            u32 size, u32 mask);
+
+void i40iw_request_reset(struct i40iw_device *iwdev);
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
+void i40iw_setup_cm_core(struct i40iw_device *iwdev);
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
+void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
+void i40iw_process_aeq(struct i40iw_device *);
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+                        u8 state, u8 del_hash,
+                        u8 term, u8 term_len);
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack);
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+                                     u16 rem_port,
+                                     u32 *rem_addr,
+                                     u16 loc_port,
+                                     u32 *loc_addr,
+                                     bool add_refcnt);
+
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+                                          struct i40iw_sc_qp *qp,
+                                          struct i40iw_qp_flush_info *info,
+                                          bool wait);
+
+void i40iw_copy_ip_ntohl(u32 *dst, u32 *src);
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd,
+                               u64 addr,
+                               u64 size,
+                               int acc,
+                               u64 *iova_start);
+
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+                        unsigned long event,
+                        void *ptr);
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+                         unsigned long event,
+                         void *ptr);
+int i40iw_net_event(struct notifier_block *notifier,
+                   unsigned long event,
+                   void *ptr);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
new file mode 100644 (file)
index 0000000..92745d7
--- /dev/null
@@ -0,0 +1,4141 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/atomic.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/notifier.h>
+#include <linux/net.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/highmem.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
+#include <net/neighbour.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/ip_fib.h>
+#include <net/tcp.h>
+#include <asm/checksum.h>
+
+#include "i40iw.h"
+
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
+static void i40iw_cm_post_event(struct i40iw_cm_event *event);
+static void i40iw_disconnect_worker(struct work_struct *work);
+
+/**
+ * i40iw_free_sqbuf - put back puda buffer if refcount = 0
+ * @dev: FPK device
+ * @buf: puda buffer to free
+ */
+void i40iw_free_sqbuf(struct i40iw_sc_dev *dev, void *bufp)
+{
+       struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
+       struct i40iw_puda_rsrc *ilq = dev->ilq;
+
+       if (!atomic_dec_return(&buf->refcount))
+               i40iw_puda_ret_bufpool(ilq, buf);
+}
+
+/**
+ * i40iw_derive_hw_ird_setting - Calculate IRD
+ *
+ * @cm_ird: IRD of connection's node
+ *
+ * The ird from the connection is rounded to a supported HW
+ * setting (2,8,32,64) and then encoded for ird_size field of
+ * qp_ctx
+ */
+static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
+{
+       u8 encoded_ird_size;
+       u8 pof2_cm_ird = 1;
+
+       /* round-off to next powerof2 */
+       while (pof2_cm_ird < cm_ird)
+               pof2_cm_ird *= 2;
+
+       /* ird_size field is encoded in qp_ctx */
+       switch (pof2_cm_ird) {
+       case I40IW_HW_IRD_SETTING_64:
+               encoded_ird_size = 3;
+               break;
+       case I40IW_HW_IRD_SETTING_32:
+       case I40IW_HW_IRD_SETTING_16:
+               encoded_ird_size = 2;
+               break;
+       case I40IW_HW_IRD_SETTING_8:
+       case I40IW_HW_IRD_SETTING_4:
+               encoded_ird_size = 1;
+               break;
+       case I40IW_HW_IRD_SETTING_2:
+       default:
+               encoded_ird_size = 0;
+               break;
+       }
+       return encoded_ird_size;
+}
+
+/**
+ * i40iw_record_ird_ord - Record IRD/ORD passed in
+ * @cm_node: connection's node
+ * @conn_ird: connection IRD
+ * @conn_ord: connection ORD
+ */
+static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u16 conn_ird, u16 conn_ord)
+{
+       if (conn_ird > I40IW_MAX_IRD_SIZE)
+               conn_ird = I40IW_MAX_IRD_SIZE;
+
+       if (conn_ord > I40IW_MAX_ORD_SIZE)
+               conn_ord = I40IW_MAX_ORD_SIZE;
+
+       cm_node->ird_size = conn_ird;
+       cm_node->ord_size = conn_ord;
+}
+
+/**
+ * i40iw_copy_ip_ntohl - change network to host ip
+ * @dst: host ip
+ * @src: big endian
+ */
+void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
+{
+       *dst++ = ntohl(*src++);
+       *dst++ = ntohl(*src++);
+       *dst++ = ntohl(*src++);
+       *dst = ntohl(*src);
+}
+
+/**
+ * i40iw_copy_ip_htonl - change host addr to network ip
+ * @dst: host ip
+ * @src: little endian
+ */
+static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
+{
+       *dst++ = htonl(*src++);
+       *dst++ = htonl(*src++);
+       *dst++ = htonl(*src++);
+       *dst = htonl(*src);
+}
+
+/**
+ * i40iw_fill_sockaddr4 - get addr info for passive connection
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
+                                       struct iw_cm_event *event)
+{
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
+
+       laddr->sin_family = AF_INET;
+       raddr->sin_family = AF_INET;
+
+       laddr->sin_port = htons(cm_node->loc_port);
+       raddr->sin_port = htons(cm_node->rem_port);
+
+       laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
+       raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
+}
+
+/**
+ * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
+ * @cm_node: connection's node
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
+                                       struct iw_cm_event *event)
+{
+       struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
+       struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
+
+       laddr6->sin6_family = AF_INET6;
+       raddr6->sin6_family = AF_INET6;
+
+       laddr6->sin6_port = htons(cm_node->loc_port);
+       raddr6->sin6_port = htons(cm_node->rem_port);
+
+       i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
+                           cm_node->loc_addr);
+       i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
+                           cm_node->rem_addr);
+}
+
+/**
+ * i40iw_get_addr_info
+ * @cm_node: contains ip/tcp info
+ * @cm_info: to get a copy of the cm_node ip/tcp info
+*/
+static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
+                               struct i40iw_cm_info *cm_info)
+{
+       cm_info->ipv4 = cm_node->ipv4;
+       cm_info->vlan_id = cm_node->vlan_id;
+       memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
+       memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
+       cm_info->loc_port = cm_node->loc_port;
+       cm_info->rem_port = cm_node->rem_port;
+}
+
+/**
+ * i40iw_get_cmevent_info - for cm event upcall
+ * @cm_node: connection's node
+ * @cm_id: upper layers cm struct for the event
+ * @event: upper layer's cm event
+ */
+static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
+                                         struct iw_cm_id *cm_id,
+                                         struct iw_cm_event *event)
+{
+       memcpy(&event->local_addr, &cm_id->m_local_addr,
+              sizeof(event->local_addr));
+       memcpy(&event->remote_addr, &cm_id->m_remote_addr,
+              sizeof(event->remote_addr));
+       if (cm_node) {
+               event->private_data = (void *)cm_node->pdata_buf;
+               event->private_data_len = (u8)cm_node->pdata.size;
+               event->ird = cm_node->ird_size;
+               event->ord = cm_node->ord_size;
+       }
+}
+
+/**
+ * i40iw_send_cm_event - upcall cm's event handler
+ * @cm_node: connection's node
+ * @cm_id: upper layer's cm info struct
+ * @type: Event type to indicate
+ * @status: status for the event type
+ */
+static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
+                              struct iw_cm_id *cm_id,
+                              enum iw_cm_event_type type,
+                              int status)
+{
+       struct iw_cm_event event;
+
+       memset(&event, 0, sizeof(event));
+       event.event = type;
+       event.status = status;
+       switch (type) {
+       case IW_CM_EVENT_CONNECT_REQUEST:
+               if (cm_node->ipv4)
+                       i40iw_fill_sockaddr4(cm_node, &event);
+               else
+                       i40iw_fill_sockaddr6(cm_node, &event);
+               event.provider_data = (void *)cm_node;
+               event.private_data = (void *)cm_node->pdata_buf;
+               event.private_data_len = (u8)cm_node->pdata.size;
+               break;
+       case IW_CM_EVENT_CONNECT_REPLY:
+               i40iw_get_cmevent_info(cm_node, cm_id, &event);
+               break;
+       case IW_CM_EVENT_ESTABLISHED:
+               event.ird = cm_node->ird_size;
+               event.ord = cm_node->ord_size;
+               break;
+       case IW_CM_EVENT_DISCONNECT:
+               break;
+       case IW_CM_EVENT_CLOSE:
+               break;
+       default:
+               i40iw_pr_err("event type received type = %d\n", type);
+               return -1;
+       }
+       return cm_id->event_handler(cm_id, &event);
+}
+
+/**
+ * i40iw_create_event - create cm event
+ * @cm_node: connection's node
+ * @type: Event type to generate
+ */
+static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
+                                                enum i40iw_cm_event_type type)
+{
+       struct i40iw_cm_event *event;
+
+       if (!cm_node->cm_id)
+               return NULL;
+
+       event = kzalloc(sizeof(*event), GFP_ATOMIC);
+
+       if (!event)
+               return NULL;
+
+       event->type = type;
+       event->cm_node = cm_node;
+       memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
+       memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
+       event->cm_info.rem_port = cm_node->rem_port;
+       event->cm_info.loc_port = cm_node->loc_port;
+       event->cm_info.cm_id = cm_node->cm_id;
+
+       i40iw_debug(cm_node->dev,
+                   I40IW_DEBUG_CM,
+                   "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
+                   cm_node,
+                   event,
+                   type,
+                   event->cm_info.loc_addr,
+                   event->cm_info.rem_addr);
+
+       i40iw_cm_post_event(event);
+       return event;
+}
+
+/**
+ * i40iw_free_retrans_entry - free send entry
+ * @cm_node: connection's node
+ */
+static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_sc_dev *dev = cm_node->dev;
+       struct i40iw_timer_entry *send_entry;
+
+       send_entry = cm_node->send_entry;
+       if (send_entry) {
+               cm_node->send_entry = NULL;
+               i40iw_free_sqbuf(dev, (void *)send_entry->sqbuf);
+               kfree(send_entry);
+               atomic_dec(&cm_node->ref_count);
+       }
+}
+
+/**
+ * i40iw_cleanup_retrans_entry - free send entry with lock
+ * @cm_node: connection's node
+ */
+static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+       i40iw_free_retrans_entry(cm_node);
+       spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+}
+
+static bool is_remote_ne020_or_chelsio(struct i40iw_cm_node *cm_node)
+{
+       if ((cm_node->rem_mac[0] == 0x0) &&
+           (((cm_node->rem_mac[1] == 0x12) && (cm_node->rem_mac[2] == 0x55)) ||
+            ((cm_node->rem_mac[1] == 0x07 && (cm_node->rem_mac[2] == 0x43)))))
+               return true;
+       return false;
+}
+
+/**
+ * i40iw_form_cm_frame - get a free packet and build frame
+ * @cm_node: connection's node ionfo to use in frame
+ * @options: pointer to options info
+ * @hdr: pointer mpa header
+ * @pdata: pointer to private data
+ * @flags:  indicates FIN or ACK
+ */
+static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
+                                                 struct i40iw_kmem_info *options,
+                                                 struct i40iw_kmem_info *hdr,
+                                                 struct i40iw_kmem_info *pdata,
+                                                 u8 flags)
+{
+       struct i40iw_puda_buf *sqbuf;
+       struct i40iw_sc_dev *dev = cm_node->dev;
+       u8 *buf;
+
+       struct tcphdr *tcph;
+       struct iphdr *iph;
+       struct ipv6hdr *ip6h;
+       struct ethhdr *ethh;
+       u16 packetsize;
+       u16 eth_hlen = ETH_HLEN;
+       u32 opts_len = 0;
+       u32 pd_len = 0;
+       u32 hdr_len = 0;
+
+       sqbuf = i40iw_puda_get_bufpool(dev->ilq);
+       if (!sqbuf)
+               return NULL;
+       buf = sqbuf->mem.va;
+
+       if (options)
+               opts_len = (u32)options->size;
+
+       if (hdr)
+               hdr_len = hdr->size;
+
+       if (pdata) {
+               pd_len = pdata->size;
+               if (!is_remote_ne020_or_chelsio(cm_node))
+                       pd_len += MPA_ZERO_PAD_LEN;
+       }
+
+       if (cm_node->vlan_id < VLAN_TAG_PRESENT)
+               eth_hlen += 4;
+
+       if (cm_node->ipv4)
+               packetsize = sizeof(*iph) + sizeof(*tcph);
+       else
+               packetsize = sizeof(*ip6h) + sizeof(*tcph);
+       packetsize += opts_len + hdr_len + pd_len;
+
+       memset(buf, 0x00, eth_hlen + packetsize);
+
+       sqbuf->totallen = packetsize + eth_hlen;
+       sqbuf->maclen = eth_hlen;
+       sqbuf->tcphlen = sizeof(*tcph) + opts_len;
+       sqbuf->scratch = (void *)cm_node;
+
+       ethh = (struct ethhdr *)buf;
+       buf += eth_hlen;
+
+       if (cm_node->ipv4) {
+               sqbuf->ipv4 = true;
+
+               iph = (struct iphdr *)buf;
+               buf += sizeof(*iph);
+               tcph = (struct tcphdr *)buf;
+               buf += sizeof(*tcph);
+
+               ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+               ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+               if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
+               } else {
+                       ethh->h_proto = htons(ETH_P_IP);
+               }
+
+               iph->version = IPVERSION;
+               iph->ihl = 5;   /* 5 * 4Byte words, IP headr len */
+               iph->tos = 0;
+               iph->tot_len = htons(packetsize);
+               iph->id = htons(++cm_node->tcp_cntxt.loc_id);
+
+               iph->frag_off = htons(0x4000);
+               iph->ttl = 0x40;
+               iph->protocol = IPPROTO_TCP;
+               iph->saddr = htonl(cm_node->loc_addr[0]);
+               iph->daddr = htonl(cm_node->rem_addr[0]);
+       } else {
+               sqbuf->ipv4 = false;
+               ip6h = (struct ipv6hdr *)buf;
+               buf += sizeof(*ip6h);
+               tcph = (struct tcphdr *)buf;
+               buf += sizeof(*tcph);
+
+               ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
+               ether_addr_copy(ethh->h_source, cm_node->loc_mac);
+               if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(cm_node->vlan_id);
+                       ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+               } else {
+                       ethh->h_proto = htons(ETH_P_IPV6);
+               }
+               ip6h->version = 6;
+               ip6h->flow_lbl[0] = 0;
+               ip6h->flow_lbl[1] = 0;
+               ip6h->flow_lbl[2] = 0;
+               ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
+               ip6h->nexthdr = 6;
+               ip6h->hop_limit = 128;
+               i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
+                                   cm_node->loc_addr);
+               i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
+                                   cm_node->rem_addr);
+       }
+
+       tcph->source = htons(cm_node->loc_port);
+       tcph->dest = htons(cm_node->rem_port);
+
+       tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
+
+       if (flags & SET_ACK) {
+               cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
+               tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
+               tcph->ack = 1;
+       } else {
+               tcph->ack_seq = 0;
+       }
+
+       if (flags & SET_SYN) {
+               cm_node->tcp_cntxt.loc_seq_num++;
+               tcph->syn = 1;
+       } else {
+               cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
+       }
+
+       if (flags & SET_FIN) {
+               cm_node->tcp_cntxt.loc_seq_num++;
+               tcph->fin = 1;
+       }
+
+       if (flags & SET_RST)
+               tcph->rst = 1;
+
+       tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
+       sqbuf->tcphlen = tcph->doff << 2;
+       tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
+       tcph->urg_ptr = 0;
+
+       if (opts_len) {
+               memcpy(buf, options->addr, opts_len);
+               buf += opts_len;
+       }
+
+       if (hdr_len) {
+               memcpy(buf, hdr->addr, hdr_len);
+               buf += hdr_len;
+       }
+
+       if (pd_len)
+               memcpy(buf, pdata->addr, pd_len);
+
+       atomic_set(&sqbuf->refcount, 1);
+
+       return sqbuf;
+}
+
+/**
+ * i40iw_send_reset - Send RST packet
+ * @cm_node: connection's node
+ */
+static int i40iw_send_reset(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_puda_buf *sqbuf;
+       int flags = SET_RST | SET_ACK;
+
+       sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
+       if (!sqbuf) {
+               i40iw_pr_err("no sqbuf\n");
+               return -1;
+       }
+
+       return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
+}
+
+/**
+ * i40iw_active_open_err - send event for active side cm error
+ * @cm_node: connection's node
+ * @reset: Flag to send reset or not
+ */
+static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+       i40iw_cleanup_retrans_entry(cm_node);
+       cm_node->cm_core->stats_connect_errs++;
+       if (reset) {
+               i40iw_debug(cm_node->dev,
+                           I40IW_DEBUG_CM,
+                           "%s cm_node=%p state=%d\n",
+                           __func__,
+                           cm_node,
+                           cm_node->state);
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+       }
+
+       cm_node->state = I40IW_CM_STATE_CLOSED;
+       i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+}
+
+/**
+ * i40iw_passive_open_err - handle passive side cm error
+ * @cm_node: connection's node
+ * @reset: send reset or just free cm_node
+ */
+static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
+{
+       i40iw_cleanup_retrans_entry(cm_node);
+       cm_node->cm_core->stats_passive_errs++;
+       cm_node->state = I40IW_CM_STATE_CLOSED;
+       i40iw_debug(cm_node->dev,
+                   I40IW_DEBUG_CM,
+                   "%s cm_node=%p state =%d\n",
+                   __func__,
+                   cm_node,
+                   cm_node->state);
+       if (reset)
+               i40iw_send_reset(cm_node);
+       else
+               i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_event_connect_error - to create connect error event
+ * @event: cm information for connect event
+ */
+static void i40iw_event_connect_error(struct i40iw_cm_event *event)
+{
+       struct i40iw_qp *iwqp;
+       struct iw_cm_id *cm_id;
+
+       cm_id = event->cm_node->cm_id;
+       if (!cm_id)
+               return;
+
+       iwqp = cm_id->provider_data;
+
+       if (!iwqp || !iwqp->iwdev)
+               return;
+
+       iwqp->cm_id = NULL;
+       cm_id->provider_data = NULL;
+       i40iw_send_cm_event(event->cm_node, cm_id,
+                           IW_CM_EVENT_CONNECT_REPLY,
+                           -ECONNRESET);
+       cm_id->rem_ref(cm_id);
+       i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_process_options
+ * @cm_node: connection's node
+ * @optionsloc: point to start of options
+ * @optionsize: size of all options
+ * @syn_packet: flag if syn packet
+ */
+static int i40iw_process_options(struct i40iw_cm_node *cm_node,
+                                u8 *optionsloc,
+                                u32 optionsize,
+                                u32 syn_packet)
+{
+       u32 tmp;
+       u32 offset = 0;
+       union all_known_options *all_options;
+       char got_mss_option = 0;
+
+       while (offset < optionsize) {
+               all_options = (union all_known_options *)(optionsloc + offset);
+               switch (all_options->as_base.optionnum) {
+               case OPTION_NUMBER_END:
+                       offset = optionsize;
+                       break;
+               case OPTION_NUMBER_NONE:
+                       offset += 1;
+                       continue;
+               case OPTION_NUMBER_MSS:
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "%s: MSS Length: %d Offset: %d Size: %d\n",
+                                   __func__,
+                                   all_options->as_mss.length,
+                                   offset,
+                                   optionsize);
+                       got_mss_option = 1;
+                       if (all_options->as_mss.length != 4)
+                               return -1;
+                       tmp = ntohs(all_options->as_mss.mss);
+                       if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
+                               cm_node->tcp_cntxt.mss = tmp;
+                       break;
+               case OPTION_NUMBER_WINDOW_SCALE:
+                       cm_node->tcp_cntxt.snd_wscale =
+                           all_options->as_windowscale.shiftcount;
+                       break;
+               default:
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "TCP Option not understood: %x\n",
+                                   all_options->as_base.optionnum);
+                       break;
+               }
+               offset += all_options->as_base.length;
+       }
+       if (!got_mss_option && syn_packet)
+               cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
+       return 0;
+}
+
+/**
+ * i40iw_handle_tcp_options -
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ * @optionsize: size of options rcvd
+ * @passive: active or passive flag
+ */
+static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
+                                   struct tcphdr *tcph,
+                                   int optionsize,
+                                   int passive)
+{
+       u8 *optionsloc = (u8 *)&tcph[1];
+
+       if (optionsize) {
+               if (i40iw_process_options(cm_node,
+                                         optionsloc,
+                                         optionsize,
+                                         (u32)tcph->syn)) {
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "%s: Node %p, Sending RESET\n",
+                                   __func__,
+                                   cm_node);
+                       if (passive)
+                               i40iw_passive_open_err(cm_node, true);
+                       else
+                               i40iw_active_open_err(cm_node, true);
+                       return -1;
+               }
+       }
+
+       cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
+           cm_node->tcp_cntxt.snd_wscale;
+
+       if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
+               cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
+       return 0;
+}
+
+/**
+ * i40iw_build_mpa_v1 - build a MPA V1 frame
+ * @cm_node: connection's node
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
+                              void *start_addr,
+                              u8 mpa_key)
+{
+       struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
+
+       switch (mpa_key) {
+       case MPA_KEY_REQUEST:
+               memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
+               break;
+       case MPA_KEY_REPLY:
+               memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
+               break;
+       default:
+               break;
+       }
+       mpa_frame->flags = IETF_MPA_FLAGS_CRC;
+       mpa_frame->rev = cm_node->mpa_frame_rev;
+       mpa_frame->priv_data_len = htons(cm_node->pdata.size);
+}
+
+/**
+ * i40iw_build_mpa_v2 - build a MPA V2 frame
+ * @cm_node: connection's node
+ * @start_addr: buffer start address
+ * @mpa_key: to do read0 or write0
+ */
+static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
+                              void *start_addr,
+                              u8 mpa_key)
+{
+       struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
+       struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
+
+       /* initialize the upper 5 bytes of the frame */
+       i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
+       mpa_frame->flags |= IETF_MPA_V2_FLAG;
+       mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
+
+       /* initialize RTR msg */
+       if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
+               rtr_msg->ctrl_ird = IETF_NO_IRD_ORD;
+               rtr_msg->ctrl_ord = IETF_NO_IRD_ORD;
+       } else {
+               rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
+                       IETF_NO_IRD_ORD : cm_node->ird_size;
+               rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
+                       IETF_NO_IRD_ORD : cm_node->ord_size;
+       }
+
+       rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER;
+       rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN;
+
+       switch (mpa_key) {
+       case MPA_KEY_REQUEST:
+               rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+               rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+               break;
+       case MPA_KEY_REPLY:
+               switch (cm_node->send_rdma0_op) {
+               case SEND_RDMA_WRITE_ZERO:
+                       rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE;
+                       break;
+               case SEND_RDMA_READ_ZERO:
+                       rtr_msg->ctrl_ord |= IETF_RDMA0_READ;
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+       rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird);
+       rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord);
+}
+
+/**
+ * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
+ * @cm_node: connection's node
+ * @mpa: mpa: data buffer
+ * @mpa_key: to do read0 or write0
+ */
+static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
+                                   struct i40iw_kmem_info *mpa,
+                                   u8 mpa_key)
+{
+       int hdr_len = 0;
+
+       switch (cm_node->mpa_frame_rev) {
+       case IETF_MPA_V1:
+               hdr_len = sizeof(struct ietf_mpa_v1);
+               i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
+               break;
+       case IETF_MPA_V2:
+               hdr_len = sizeof(struct ietf_mpa_v2);
+               i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
+               break;
+       default:
+               break;
+       }
+
+       return hdr_len;
+}
+
+/**
+ * i40iw_send_mpa_request - active node send mpa request to passive node
+ * @cm_node: connection's node
+ */
+static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_puda_buf *sqbuf;
+
+       if (!cm_node) {
+               i40iw_pr_err("cm_node == NULL\n");
+               return -1;
+       }
+
+       cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+       cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+                                                        &cm_node->mpa_hdr,
+                                                        MPA_KEY_REQUEST);
+       if (!cm_node->mpa_hdr.size) {
+               i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
+               return -1;
+       }
+
+       sqbuf = i40iw_form_cm_frame(cm_node,
+                                   NULL,
+                                   &cm_node->mpa_hdr,
+                                   &cm_node->pdata,
+                                   SET_ACK);
+       if (!sqbuf) {
+               i40iw_pr_err("sq_buf == NULL\n");
+               return -1;
+       }
+       return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_mpa_reject -
+ * @cm_node: connection's node
+ * @pdata: reject data for connection
+ * @plen: length of reject data
+ */
+static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
+                                const void *pdata,
+                                u8 plen)
+{
+       struct i40iw_puda_buf *sqbuf;
+       struct i40iw_kmem_info priv_info;
+
+       cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
+       cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
+                                                        &cm_node->mpa_hdr,
+                                                        MPA_KEY_REPLY);
+
+       cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
+       priv_info.addr = (void *)pdata;
+       priv_info.size = plen;
+
+       sqbuf = i40iw_form_cm_frame(cm_node,
+                                   NULL,
+                                   &cm_node->mpa_hdr,
+                                   &priv_info,
+                                   SET_ACK | SET_FIN);
+       if (!sqbuf) {
+               i40iw_pr_err("no sqbuf\n");
+               return -ENOMEM;
+       }
+       cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
+       return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * recv_mpa - process an IETF MPA frame
+ * @cm_node: connection's node
+ * @buffer: Data pointer
+ * @type: to return accept or reject
+ * @len: Len of mpa buffer
+ */
+static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
+{
+       struct ietf_mpa_v1 *mpa_frame;
+       struct ietf_mpa_v2 *mpa_v2_frame;
+       struct ietf_rtr_msg *rtr_msg;
+       int mpa_hdr_len;
+       int priv_data_len;
+
+       *type = I40IW_MPA_REQUEST_ACCEPT;
+
+       if (len < sizeof(struct ietf_mpa_v1)) {
+               i40iw_pr_err("ietf buffer small (%x)\n", len);
+               return -1;
+       }
+
+       mpa_frame = (struct ietf_mpa_v1 *)buffer;
+       mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+       priv_data_len = ntohs(mpa_frame->priv_data_len);
+
+       if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
+               i40iw_pr_err("large pri_data %d\n", priv_data_len);
+               return -1;
+       }
+       if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
+               i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
+               return -1;
+       }
+       if (mpa_frame->rev > cm_node->mpa_frame_rev) {
+               i40iw_pr_err("rev %d\n", mpa_frame->rev);
+               return -1;
+       }
+       cm_node->mpa_frame_rev = mpa_frame->rev;
+
+       if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+               if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
+                       i40iw_pr_err("Unexpected MPA Key received\n");
+                       return -1;
+               }
+       } else {
+               if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
+                       i40iw_pr_err("Unexpected MPA Key received\n");
+                       return -1;
+               }
+       }
+
+       if (priv_data_len + mpa_hdr_len > len) {
+               i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
+                            priv_data_len, mpa_hdr_len, len);
+               return -1;
+       }
+       if (len > MAX_CM_BUFFER) {
+               i40iw_pr_err("ietf buffer large len = %d\n", len);
+               return -1;
+       }
+
+       switch (mpa_frame->rev) {
+       case IETF_MPA_V2:{
+                       u16 ird_size;
+                       u16 ord_size;
+                       u16 ctrl_ord;
+                       u16 ctrl_ird;
+
+                       mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
+                       mpa_hdr_len += IETF_RTR_MSG_SIZE;
+                       rtr_msg = &mpa_v2_frame->rtr_msg;
+
+                       /* parse rtr message */
+                       ctrl_ord = ntohs(rtr_msg->ctrl_ord);
+                       ctrl_ird = ntohs(rtr_msg->ctrl_ird);
+                       ird_size = ctrl_ird & IETF_NO_IRD_ORD;
+                       ord_size = ctrl_ord & IETF_NO_IRD_ORD;
+
+                       if (!(ctrl_ird & IETF_PEER_TO_PEER))
+                               return -1;
+
+                       if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
+                               cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
+                               goto negotiate_done;
+                       }
+
+                       if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
+                               /* responder */
+                               if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
+                                       cm_node->ird_size = 1;
+                               if (cm_node->ord_size > ird_size)
+                                       cm_node->ord_size = ird_size;
+                       } else {
+                               /* initiator */
+                               if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
+                                       return -1;
+                               if (cm_node->ord_size > ird_size)
+                                       cm_node->ord_size = ird_size;
+
+                               if (cm_node->ird_size < ord_size)
+                                       /* no resources available */
+                                       return -1;
+                       }
+
+negotiate_done:
+                       if (ctrl_ord & IETF_RDMA0_READ)
+                               cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+                       else if (ctrl_ord & IETF_RDMA0_WRITE)
+                               cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
+                       else    /* Not supported RDMA0 operation */
+                               return -1;
+                       i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+                                   "MPAV2: Negotiated ORD: %d, IRD: %d\n",
+                                   cm_node->ord_size, cm_node->ird_size);
+                       break;
+               }
+               break;
+       case IETF_MPA_V1:
+       default:
+               break;
+       }
+
+       memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
+       cm_node->pdata.size = priv_data_len;
+
+       if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
+               *type = I40IW_MPA_REQUEST_REJECT;
+
+       if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
+               cm_node->snd_mark_en = true;
+
+       return 0;
+}
+
+/**
+ * i40iw_schedule_cm_timer
+ * @@cm_node: connection's node
+ * @sqbuf: buffer to send
+ * @type: if it es send ot close
+ * @send_retrans: if rexmits to be done
+ * @close_when_complete: is cm_node to be removed
+ *
+ * note - cm_node needs to be protected before calling this. Encase in:
+ *             i40iw_rem_ref_cm_node(cm_core, cm_node);
+ *             i40iw_schedule_cm_timer(...)
+ *             atomic_inc(&cm_node->ref_count);
+ */
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+                           struct i40iw_puda_buf *sqbuf,
+                           enum i40iw_timer_type type,
+                           int send_retrans,
+                           int close_when_complete)
+{
+       struct i40iw_sc_dev *dev = cm_node->dev;
+       struct i40iw_cm_core *cm_core = cm_node->cm_core;
+       struct i40iw_timer_entry *new_send;
+       int ret = 0;
+       u32 was_timer_set;
+       unsigned long flags;
+
+       new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
+       if (!new_send) {
+               i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+               return -ENOMEM;
+       }
+       new_send->retrycount = I40IW_DEFAULT_RETRYS;
+       new_send->retranscount = I40IW_DEFAULT_RETRANS;
+       new_send->sqbuf = sqbuf;
+       new_send->timetosend = jiffies;
+       new_send->type = type;
+       new_send->send_retrans = send_retrans;
+       new_send->close_when_complete = close_when_complete;
+
+       if (type == I40IW_TIMER_TYPE_CLOSE) {
+               new_send->timetosend += (HZ / 10);
+               if (cm_node->close_entry) {
+                       kfree(new_send);
+                       i40iw_free_sqbuf(cm_node->dev, (void *)sqbuf);
+                       i40iw_pr_err("already close entry\n");
+                       return -EINVAL;
+               }
+               cm_node->close_entry = new_send;
+       }
+
+       if (type == I40IW_TIMER_TYPE_SEND) {
+               spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+               cm_node->send_entry = new_send;
+               atomic_inc(&cm_node->ref_count);
+               spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+               new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
+
+               atomic_inc(&sqbuf->refcount);
+               i40iw_puda_send_buf(dev->ilq, sqbuf);
+               if (!send_retrans) {
+                       i40iw_cleanup_retrans_entry(cm_node);
+                       if (close_when_complete)
+                               i40iw_rem_ref_cm_node(cm_node);
+                       return ret;
+               }
+       }
+
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+       was_timer_set = timer_pending(&cm_core->tcp_timer);
+
+       if (!was_timer_set) {
+               cm_core->tcp_timer.expires = new_send->timetosend;
+               add_timer(&cm_core->tcp_timer);
+       }
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+       return ret;
+}
+
+/**
+ * i40iw_retrans_expired - Could not rexmit the packet
+ * @cm_node: connection's node
+ */
+static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
+{
+       struct iw_cm_id *cm_id = cm_node->cm_id;
+       enum i40iw_cm_node_state state = cm_node->state;
+
+       cm_node->state = I40IW_CM_STATE_CLOSED;
+       switch (state) {
+       case I40IW_CM_STATE_SYN_RCVD:
+       case I40IW_CM_STATE_CLOSING:
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       case I40IW_CM_STATE_FIN_WAIT1:
+       case I40IW_CM_STATE_LAST_ACK:
+               if (cm_node->cm_id)
+                       cm_id->rem_ref(cm_id);
+               i40iw_send_reset(cm_node);
+               break;
+       default:
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+               i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+               break;
+       }
+}
+
+/**
+ * i40iw_handle_close_entry - for handling retry/timeouts
+ * @cm_node: connection's node
+ * @rem_node: flag for remove cm_node
+ */
+static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
+{
+       struct i40iw_timer_entry *close_entry = cm_node->close_entry;
+       struct iw_cm_id *cm_id = cm_node->cm_id;
+       struct i40iw_qp *iwqp;
+       unsigned long flags;
+
+       if (!close_entry)
+               return;
+       iwqp = (struct i40iw_qp *)close_entry->sqbuf;
+       if (iwqp) {
+               spin_lock_irqsave(&iwqp->lock, flags);
+               if (iwqp->cm_id) {
+                       iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+                       iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
+                       iwqp->last_aeq = I40IW_AE_RESET_SENT;
+                       iwqp->ibqp_state = IB_QPS_ERR;
+                       spin_unlock_irqrestore(&iwqp->lock, flags);
+                       i40iw_cm_disconn(iwqp);
+               } else {
+                       spin_unlock_irqrestore(&iwqp->lock, flags);
+               }
+       } else if (rem_node) {
+               /* TIME_WAIT state */
+               i40iw_rem_ref_cm_node(cm_node);
+       }
+       if (cm_id)
+               cm_id->rem_ref(cm_id);
+       kfree(close_entry);
+       cm_node->close_entry = NULL;
+}
+
+/**
+ * i40iw_cm_timer_tick - system's timer expired callback
+ * @pass: Pointing to cm_core
+ */
+static void i40iw_cm_timer_tick(unsigned long pass)
+{
+       unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_timer_entry *send_entry, *close_entry;
+       struct list_head *list_core_temp;
+       struct list_head *list_node;
+       struct i40iw_cm_core *cm_core = (struct i40iw_cm_core *)pass;
+       u32 settimer = 0;
+       unsigned long timetosend;
+       struct i40iw_sc_dev *dev;
+       unsigned long flags;
+
+       struct list_head timer_list;
+
+       INIT_LIST_HEAD(&timer_list);
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+       list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+               cm_node = container_of(list_node, struct i40iw_cm_node, list);
+               if (cm_node->close_entry || cm_node->send_entry) {
+                       atomic_inc(&cm_node->ref_count);
+                       list_add(&cm_node->timer_entry, &timer_list);
+               }
+       }
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+       list_for_each_safe(list_node, list_core_temp, &timer_list) {
+               cm_node = container_of(list_node,
+                                      struct i40iw_cm_node,
+                                      timer_entry);
+               close_entry = cm_node->close_entry;
+
+               if (close_entry) {
+                       if (time_after(close_entry->timetosend, jiffies)) {
+                               if (nexttimeout > close_entry->timetosend ||
+                                   !settimer) {
+                                       nexttimeout = close_entry->timetosend;
+                                       settimer = 1;
+                               }
+                       } else {
+                               i40iw_handle_close_entry(cm_node, 1);
+                       }
+               }
+
+               spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+
+               send_entry = cm_node->send_entry;
+               if (!send_entry)
+                       goto done;
+               if (time_after(send_entry->timetosend, jiffies)) {
+                       if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
+                               if ((nexttimeout > send_entry->timetosend) ||
+                                   !settimer) {
+                                       nexttimeout = send_entry->timetosend;
+                                       settimer = 1;
+                               }
+                       } else {
+                               i40iw_free_retrans_entry(cm_node);
+                       }
+                       goto done;
+               }
+
+               if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
+                   (cm_node->state == I40IW_CM_STATE_CLOSED)) {
+                       i40iw_free_retrans_entry(cm_node);
+                       goto done;
+               }
+
+               if (!send_entry->retranscount || !send_entry->retrycount) {
+                       i40iw_free_retrans_entry(cm_node);
+
+                       spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+                       i40iw_retrans_expired(cm_node);
+                       cm_node->state = I40IW_CM_STATE_CLOSED;
+                       spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+                       goto done;
+               }
+               cm_node->cm_core->stats_pkt_retrans++;
+               spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+
+               dev = cm_node->dev;
+               atomic_inc(&send_entry->sqbuf->refcount);
+               i40iw_puda_send_buf(dev->ilq, send_entry->sqbuf);
+               spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
+               if (send_entry->send_retrans) {
+                       send_entry->retranscount--;
+                       timetosend = (I40IW_RETRY_TIMEOUT <<
+                                     (I40IW_DEFAULT_RETRANS -
+                                      send_entry->retranscount));
+
+                       send_entry->timetosend = jiffies +
+                           min(timetosend, I40IW_MAX_TIMEOUT);
+                       if (nexttimeout > send_entry->timetosend || !settimer) {
+                               nexttimeout = send_entry->timetosend;
+                               settimer = 1;
+                       }
+               } else {
+                       int close_when_complete;
+
+                       close_when_complete = send_entry->close_when_complete;
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "cm_node=%p state=%d\n",
+                                   cm_node,
+                                   cm_node->state);
+                       i40iw_free_retrans_entry(cm_node);
+                       if (close_when_complete)
+                               i40iw_rem_ref_cm_node(cm_node);
+               }
+done:
+               spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+               i40iw_rem_ref_cm_node(cm_node);
+       }
+
+       if (settimer) {
+               spin_lock_irqsave(&cm_core->ht_lock, flags);
+               if (!timer_pending(&cm_core->tcp_timer)) {
+                       cm_core->tcp_timer.expires = nexttimeout;
+                       add_timer(&cm_core->tcp_timer);
+               }
+               spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+       }
+}
+
+/**
+ * i40iw_send_syn - send SYN packet
+ * @cm_node: connection's node
+ * @sendack: flag to set ACK bit or not
+ */
+int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
+{
+       struct i40iw_puda_buf *sqbuf;
+       int flags = SET_SYN;
+       char optionsbuffer[sizeof(struct option_mss) +
+                          sizeof(struct option_windowscale) +
+                          sizeof(struct option_base) + TCP_OPTIONS_PADDING];
+       struct i40iw_kmem_info opts;
+
+       int optionssize = 0;
+       /* Sending MSS option */
+       union all_known_options *options;
+
+       opts.addr = optionsbuffer;
+       if (!cm_node) {
+               i40iw_pr_err("no cm_node\n");
+               return -EINVAL;
+       }
+
+       options = (union all_known_options *)&optionsbuffer[optionssize];
+       options->as_mss.optionnum = OPTION_NUMBER_MSS;
+       options->as_mss.length = sizeof(struct option_mss);
+       options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
+       optionssize += sizeof(struct option_mss);
+
+       options = (union all_known_options *)&optionsbuffer[optionssize];
+       options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
+       options->as_windowscale.length = sizeof(struct option_windowscale);
+       options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
+       optionssize += sizeof(struct option_windowscale);
+       options = (union all_known_options *)&optionsbuffer[optionssize];
+       options->as_end = OPTION_NUMBER_END;
+       optionssize += 1;
+
+       if (sendack)
+               flags |= SET_ACK;
+
+       opts.size = optionssize;
+
+       sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
+       if (!sqbuf) {
+               i40iw_pr_err("no sqbuf\n");
+               return -1;
+       }
+       return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_send_ack - Send ACK packet
+ * @cm_node: connection's node
+ */
+static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_puda_buf *sqbuf;
+
+       sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
+       if (sqbuf)
+               i40iw_puda_send_buf(cm_node->dev->ilq, sqbuf);
+       else
+               i40iw_pr_err("no sqbuf\n");
+}
+
+/**
+ * i40iw_send_fin - Send FIN pkt
+ * @cm_node: connection's node
+ */
+static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_puda_buf *sqbuf;
+
+       sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
+       if (!sqbuf) {
+               i40iw_pr_err("no sqbuf\n");
+               return -1;
+       }
+       return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
+}
+
+/**
+ * i40iw_find_node - find a cm node that matches the reference cm node
+ * @cm_core: cm's core
+ * @rem_port: remote tcp port num
+ * @rem_addr: remote ip addr
+ * @loc_port: local tcp port num
+ * @loc_addr: loc ip addr
+ * @add_refcnt: flag to increment refcount of cm_node
+ */
+struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
+                                     u16 rem_port,
+                                     u32 *rem_addr,
+                                     u16 loc_port,
+                                     u32 *loc_addr,
+                                     bool add_refcnt)
+{
+       struct list_head *hte;
+       struct i40iw_cm_node *cm_node;
+       unsigned long flags;
+
+       hte = &cm_core->connected_nodes;
+
+       /* walk list and find cm_node associated with this session ID */
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+       list_for_each_entry(cm_node, hte, list) {
+               if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
+                   (cm_node->loc_port == loc_port) &&
+                   !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
+                   (cm_node->rem_port == rem_port)) {
+                       if (add_refcnt)
+                               atomic_inc(&cm_node->ref_count);
+                       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+                       return cm_node;
+               }
+       }
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+       /* no owner node */
+       return NULL;
+}
+
+/**
+ * i40iw_find_listener - find a cm node listening on this addr-port pair
+ * @cm_core: cm's core
+ * @dst_port: listener tcp port num
+ * @dst_addr: listener ip addr
+ * @listener_state: state to match with listen node's
+ */
+static struct i40iw_cm_listener *i40iw_find_listener(
+                                                    struct i40iw_cm_core *cm_core,
+                                                    u32 *dst_addr,
+                                                    u16 dst_port,
+                                                    u16 vlan_id,
+                                                    enum i40iw_cm_listener_state
+                                                    listener_state)
+{
+       struct i40iw_cm_listener *listen_node;
+       static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+       u32 listen_addr[4];
+       u16 listen_port;
+       unsigned long flags;
+
+       /* walk list and find cm_node associated with this session ID */
+       spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+       list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+               memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
+               listen_port = listen_node->loc_port;
+               /* compare node pair, return node handle if a match */
+               if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
+                    !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
+                    (listen_port == dst_port) &&
+                    (listener_state & listen_node->listener_state)) {
+                       atomic_inc(&listen_node->ref_count);
+                       spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+                       return listen_node;
+               }
+       }
+       spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+       return NULL;
+}
+
+/**
+ * i40iw_add_hte_node - add a cm node to the hash table
+ * @cm_core: cm's core
+ * @cm_node: connection's node
+ */
+static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
+                              struct i40iw_cm_node *cm_node)
+{
+       struct list_head *hte;
+       unsigned long flags;
+
+       if (!cm_node || !cm_core) {
+               i40iw_pr_err("cm_node or cm_core == NULL\n");
+               return;
+       }
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+
+       /* get a handle on the hash table element (list head for this slot) */
+       hte = &cm_core->connected_nodes;
+       list_add_tail(&cm_node->list, hte);
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+}
+
+/**
+ * listen_port_in_use - determine if port is in use
+ * @port: Listen port number
+ */
+static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
+{
+       struct i40iw_cm_listener *listen_node;
+       unsigned long flags;
+       bool ret = false;
+
+       spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+       list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
+               if (listen_node->loc_port == port) {
+                       ret = true;
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+       return ret;
+}
+
+/**
+ * i40iw_del_multiple_qhash - Remove qhash and child listens
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ */
+static enum i40iw_status_code i40iw_del_multiple_qhash(
+                                                      struct i40iw_device *iwdev,
+                                                      struct i40iw_cm_info *cm_info,
+                                                      struct i40iw_cm_listener *cm_parent_listen_node)
+{
+       struct i40iw_cm_listener *child_listen_node;
+       enum i40iw_status_code ret = I40IW_ERR_CONFIG;
+       struct list_head *pos, *tpos;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+       list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
+               child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
+               if (child_listen_node->ipv4)
+                       i40iw_debug(&iwdev->sc_dev,
+                                   I40IW_DEBUG_CM,
+                                   "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
+                                   child_listen_node->loc_addr,
+                                   child_listen_node->loc_port,
+                                   child_listen_node->vlan_id);
+               else
+                       i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+                                   "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
+                                   child_listen_node->loc_addr,
+                                   child_listen_node->loc_port,
+                                   child_listen_node->vlan_id);
+               list_del(pos);
+               memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+                      sizeof(cm_info->loc_addr));
+               cm_info->vlan_id = child_listen_node->vlan_id;
+               ret = i40iw_manage_qhash(iwdev, cm_info,
+                                        I40IW_QHASH_TYPE_TCP_SYN,
+                                        I40IW_QHASH_MANAGE_TYPE_DELETE, NULL, false);
+               kfree(child_listen_node);
+               cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
+               i40iw_debug(&iwdev->sc_dev,
+                           I40IW_DEBUG_CM,
+                           "freed pointer = %p\n",
+                           child_listen_node);
+       }
+       spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+
+       return ret;
+}
+
+/**
+ * i40iw_netdev_vlan_ipv6 - Gets the netdev and mac
+ * @addr: local IPv6 address
+ * @vlan_id: vlan id for the given IPv6 address
+ * @mac: mac address for the given IPv6 address
+ *
+ * Returns the net_device of the IPv6 address and also sets the
+ * vlan id and mac for that address.
+ */
+static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id, u8 *mac)
+{
+       struct net_device *ip_dev = NULL;
+#if IS_ENABLED(CONFIG_IPV6)
+       struct in6_addr laddr6;
+
+       i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
+       if (vlan_id)
+               *vlan_id = I40IW_NO_VLAN;
+       if (mac)
+               eth_zero_addr(mac);
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, ip_dev) {
+               if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
+                       if (vlan_id)
+                               *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+                       if (ip_dev->dev_addr && mac)
+                               ether_addr_copy(mac, ip_dev->dev_addr);
+                       break;
+               }
+       }
+       rcu_read_unlock();
+#endif
+       return ip_dev;
+}
+
+/**
+ * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
+ * @addr: local IPv4 address
+ */
+static u16 i40iw_get_vlan_ipv4(u32 *addr)
+{
+       struct net_device *netdev;
+       u16 vlan_id = I40IW_NO_VLAN;
+
+       netdev = ip_dev_find(&init_net, htonl(addr[0]));
+       if (netdev) {
+               vlan_id = rdma_vlan_dev_vlan_id(netdev);
+               dev_put(netdev);
+       }
+       return vlan_id;
+}
+
+/**
+ * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv6 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
+                                             struct i40iw_cm_info *cm_info,
+                                             struct i40iw_cm_listener *cm_parent_listen_node)
+{
+       struct net_device *ip_dev;
+       struct inet6_dev *idev;
+       struct inet6_ifaddr *ifp;
+       enum i40iw_status_code ret = 0;
+       struct i40iw_cm_listener *child_listen_node;
+       unsigned long flags;
+
+       rtnl_lock();
+       for_each_netdev_rcu(&init_net, ip_dev) {
+               if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
+                     (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+                    (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+                       idev = __in6_dev_get(ip_dev);
+                       if (!idev) {
+                               i40iw_pr_err("idev == NULL\n");
+                               break;
+                       }
+                       list_for_each_entry(ifp, &idev->addr_list, if_list) {
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "IP=%pI6, vlan_id=%d, MAC=%pM\n",
+                                           &ifp->addr,
+                                           rdma_vlan_dev_vlan_id(ip_dev),
+                                           ip_dev->dev_addr);
+                               child_listen_node =
+                                       kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "Allocating child listener %p\n",
+                                           child_listen_node);
+                               if (!child_listen_node) {
+                                       i40iw_pr_err("listener memory allocation\n");
+                                       ret = I40IW_ERR_NO_MEMORY;
+                                       goto exit;
+                               }
+                               cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
+                               cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+
+                               memcpy(child_listen_node, cm_parent_listen_node,
+                                      sizeof(*child_listen_node));
+
+                               i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
+                                                   ifp->addr.in6_u.u6_addr32);
+                               memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+                                      sizeof(cm_info->loc_addr));
+
+                               ret = i40iw_manage_qhash(iwdev, cm_info,
+                                                        I40IW_QHASH_TYPE_TCP_SYN,
+                                                        I40IW_QHASH_MANAGE_TYPE_ADD,
+                                                        NULL, true);
+                               if (!ret) {
+                                       spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+                                       list_add(&child_listen_node->child_listen_list,
+                                                &cm_parent_listen_node->child_listen_list);
+                                       spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+                                       cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+                               } else {
+                                       kfree(child_listen_node);
+                               }
+                       }
+               }
+       }
+exit:
+       rtnl_unlock();
+       return ret;
+}
+
+/**
+ * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
+ * @iwdev: iWarp device
+ * @cm_info: CM info for parent listen node
+ * @cm_parent_listen_node: The parent listen node
+ *
+ * Adds a qhash and a child listen node for every IPv4 address
+ * on the adapter and adds the associated qhash filter
+ */
+static enum i40iw_status_code i40iw_add_mqh_4(
+                               struct i40iw_device *iwdev,
+                               struct i40iw_cm_info *cm_info,
+                               struct i40iw_cm_listener *cm_parent_listen_node)
+{
+       struct net_device *dev;
+       struct in_device *idev;
+       struct i40iw_cm_listener *child_listen_node;
+       enum i40iw_status_code ret = 0;
+       unsigned long flags;
+
+       rtnl_lock();
+       for_each_netdev(&init_net, dev) {
+               if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
+                     (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+                   (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+                       idev = in_dev_get(dev);
+                       for_ifa(idev) {
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
+                                           &ifa->ifa_address,
+                                           rdma_vlan_dev_vlan_id(dev),
+                                           dev->dev_addr);
+                               child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
+                               cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "Allocating child listener %p\n",
+                                           child_listen_node);
+                               if (!child_listen_node) {
+                                       i40iw_pr_err("listener memory allocation\n");
+                                       in_dev_put(idev);
+                                       ret = I40IW_ERR_NO_MEMORY;
+                                       goto exit;
+                               }
+                               cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
+                               cm_parent_listen_node->vlan_id = cm_info->vlan_id;
+                               memcpy(child_listen_node,
+                                      cm_parent_listen_node,
+                                      sizeof(*child_listen_node));
+
+                               child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
+                               memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
+                                      sizeof(cm_info->loc_addr));
+
+                               ret = i40iw_manage_qhash(iwdev,
+                                                        cm_info,
+                                                        I40IW_QHASH_TYPE_TCP_SYN,
+                                                        I40IW_QHASH_MANAGE_TYPE_ADD,
+                                                        NULL,
+                                                        true);
+                               if (!ret) {
+                                       spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
+                                       list_add(&child_listen_node->child_listen_list,
+                                                &cm_parent_listen_node->child_listen_list);
+                                       spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
+                               } else {
+                                       kfree(child_listen_node);
+                                       cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
+                               }
+                       }
+                       endfor_ifa(idev);
+                       in_dev_put(idev);
+               }
+       }
+exit:
+       rtnl_unlock();
+       return ret;
+}
+
+/**
+ * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
+ * @cm_core: cm's core
+ * @free_hanging_nodes: to free associated cm_nodes
+ * @apbvt_del: flag to delete the apbvt
+ */
+static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
+                                  struct i40iw_cm_listener *listener,
+                                  int free_hanging_nodes, bool apbvt_del)
+{
+       int ret = -EINVAL;
+       int err = 0;
+       struct list_head *list_pos;
+       struct list_head *list_temp;
+       struct i40iw_cm_node *cm_node;
+       struct list_head reset_list;
+       struct i40iw_cm_info nfo;
+       struct i40iw_cm_node *loopback;
+       enum i40iw_cm_node_state old_state;
+       unsigned long flags;
+
+       /* free non-accelerated child nodes for this listener */
+       INIT_LIST_HEAD(&reset_list);
+       if (free_hanging_nodes) {
+               spin_lock_irqsave(&cm_core->ht_lock, flags);
+               list_for_each_safe(list_pos, list_temp, &cm_core->connected_nodes) {
+                       cm_node = container_of(list_pos, struct i40iw_cm_node, list);
+                       if ((cm_node->listener == listener) && !cm_node->accelerated) {
+                               atomic_inc(&cm_node->ref_count);
+                               list_add(&cm_node->reset_entry, &reset_list);
+                       }
+               }
+               spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+       }
+
+       list_for_each_safe(list_pos, list_temp, &reset_list) {
+               cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
+               loopback = cm_node->loopbackpartner;
+               if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
+                       i40iw_rem_ref_cm_node(cm_node);
+               } else {
+                       if (!loopback) {
+                               i40iw_cleanup_retrans_entry(cm_node);
+                               err = i40iw_send_reset(cm_node);
+                               if (err) {
+                                       cm_node->state = I40IW_CM_STATE_CLOSED;
+                                       i40iw_pr_err("send reset\n");
+                               } else {
+                                       old_state = cm_node->state;
+                                       cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+                                       if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
+                                               i40iw_rem_ref_cm_node(cm_node);
+                               }
+                       } else {
+                               struct i40iw_cm_event event;
+
+                               event.cm_node = loopback;
+                               memcpy(event.cm_info.rem_addr,
+                                      loopback->rem_addr, sizeof(event.cm_info.rem_addr));
+                               memcpy(event.cm_info.loc_addr,
+                                      loopback->loc_addr, sizeof(event.cm_info.loc_addr));
+                               event.cm_info.rem_port = loopback->rem_port;
+                               event.cm_info.loc_port = loopback->loc_port;
+                               event.cm_info.cm_id = loopback->cm_id;
+                               event.cm_info.ipv4 = loopback->ipv4;
+                               atomic_inc(&loopback->ref_count);
+                               loopback->state = I40IW_CM_STATE_CLOSED;
+                               i40iw_event_connect_error(&event);
+                               cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
+                               i40iw_rem_ref_cm_node(cm_node);
+                       }
+               }
+       }
+
+       if (!atomic_dec_return(&listener->ref_count)) {
+               spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+               list_del(&listener->list);
+               spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+
+               if (listener->iwdev) {
+                       if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port))
+                               i40iw_manage_apbvt(listener->iwdev,
+                                                  listener->loc_port,
+                                                  I40IW_MANAGE_APBVT_DEL);
+
+                       memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
+                       nfo.loc_port = listener->loc_port;
+                       nfo.ipv4 = listener->ipv4;
+                       nfo.vlan_id = listener->vlan_id;
+
+                       if (!list_empty(&listener->child_listen_list)) {
+                               i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
+                       } else {
+                               if (listener->qhash_set)
+                                       i40iw_manage_qhash(listener->iwdev,
+                                                          &nfo,
+                                                          I40IW_QHASH_TYPE_TCP_SYN,
+                                                          I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                                          NULL,
+                                                          false);
+                       }
+               }
+
+               cm_core->stats_listen_destroyed++;
+               kfree(listener);
+               cm_core->stats_listen_nodes_destroyed++;
+               listener = NULL;
+               ret = 0;
+       }
+
+       if (listener) {
+               if (atomic_read(&listener->pend_accepts_cnt) > 0)
+                       i40iw_debug(cm_core->dev,
+                                   I40IW_DEBUG_CM,
+                                   "%s: listener (%p) pending accepts=%u\n",
+                                   __func__,
+                                   listener,
+                                   atomic_read(&listener->pend_accepts_cnt));
+       }
+
+       return ret;
+}
+
+/**
+ * i40iw_cm_del_listen - delete a linstener
+ * @cm_core: cm's core
+  * @listener: passive connection's listener
+ * @apbvt_del: flag to delete apbvt
+ */
+static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
+                              struct i40iw_cm_listener *listener,
+                              bool apbvt_del)
+{
+       listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
+       listener->cm_id = NULL; /* going to be destroyed pretty soon */
+       return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
+}
+
+/**
+ * i40iw_addr_resolve_neigh - resolve neighbor address
+ * @iwdev: iwarp device structure
+ * @src_ip: local ip address
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
+                                   u32 src_ip,
+                                   u32 dst_ip,
+                                   int arpindex)
+{
+       struct rtable *rt;
+       struct neighbour *neigh;
+       int rc = arpindex;
+       struct net_device *netdev = iwdev->netdev;
+       __be32 dst_ipaddr = htonl(dst_ip);
+       __be32 src_ipaddr = htonl(src_ip);
+
+       rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
+       if (IS_ERR(rt)) {
+               i40iw_pr_err("ip_route_output\n");
+               return rc;
+       }
+
+       if (netif_is_bond_slave(netdev))
+               netdev = netdev_master_upper_dev_get(netdev);
+
+       neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
+
+       rcu_read_lock();
+       if (neigh) {
+               if (neigh->nud_state & NUD_VALID) {
+                       if (arpindex >= 0) {
+                               if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
+                                                    neigh->ha))
+                                       /* Mac address same as arp table */
+                                       goto resolve_neigh_exit;
+                               i40iw_manage_arp_cache(iwdev,
+                                                      iwdev->arp_table[arpindex].mac_addr,
+                                                      &dst_ip,
+                                                      true,
+                                                      I40IW_ARP_DELETE);
+                       }
+
+                       i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
+                       rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
+               } else {
+                       neigh_event_send(neigh, NULL);
+               }
+       }
+ resolve_neigh_exit:
+
+       rcu_read_unlock();
+       if (neigh)
+               neigh_release(neigh);
+
+       ip_rt_put(rt);
+       return rc;
+}
+
+/**
+ * i40iw_get_dst_ipv6
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
+                                           struct sockaddr_in6 *dst_addr)
+{
+       struct dst_entry *dst;
+       struct flowi6 fl6;
+
+       memset(&fl6, 0, sizeof(fl6));
+       fl6.daddr = dst_addr->sin6_addr;
+       fl6.saddr = src_addr->sin6_addr;
+       if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+               fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+       dst = ip6_route_output(&init_net, NULL, &fl6);
+       return dst;
+}
+#endif
+
+/**
+ * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
+ * @iwdev: iwarp device structure
+ * @dst_ip: remote ip address
+ * @arpindex: if there is an arp entry
+ */
+#if IS_ENABLED(CONFIG_IPV6)
+static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
+                                        u32 *src,
+                                        u32 *dest,
+                                        int arpindex)
+{
+       struct neighbour *neigh;
+       int rc = arpindex;
+       struct net_device *netdev = iwdev->netdev;
+       struct dst_entry *dst;
+       struct sockaddr_in6 dst_addr;
+       struct sockaddr_in6 src_addr;
+
+       memset(&dst_addr, 0, sizeof(dst_addr));
+       dst_addr.sin6_family = AF_INET6;
+       i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
+       memset(&src_addr, 0, sizeof(src_addr));
+       src_addr.sin6_family = AF_INET6;
+       i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
+       dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
+       if (!dst || dst->error) {
+               if (dst) {
+                       dst_release(dst);
+                       i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
+                                    dst->error);
+               }
+               return rc;
+       }
+
+       if (netif_is_bond_slave(netdev))
+               netdev = netdev_master_upper_dev_get(netdev);
+
+       neigh = dst_neigh_lookup(dst, &dst_addr);
+
+       rcu_read_lock();
+       if (neigh) {
+               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
+               if (neigh->nud_state & NUD_VALID) {
+                       if (arpindex >= 0) {
+                               if (ether_addr_equal
+                                   (iwdev->arp_table[arpindex].mac_addr,
+                                    neigh->ha)) {
+                                       /* Mac address same as in arp table */
+                                       goto resolve_neigh_exit6;
+                               }
+                               i40iw_manage_arp_cache(iwdev,
+                                                      iwdev->arp_table[arpindex].mac_addr,
+                                                      dest,
+                                                      false,
+                                                      I40IW_ARP_DELETE);
+                       }
+                       i40iw_manage_arp_cache(iwdev,
+                                              neigh->ha,
+                                              dest,
+                                              false,
+                                              I40IW_ARP_ADD);
+                       rc = i40iw_arp_table(iwdev,
+                                            dest,
+                                            false,
+                                            NULL,
+                                            I40IW_ARP_RESOLVE);
+               } else {
+                       neigh_event_send(neigh, NULL);
+               }
+       }
+
+ resolve_neigh_exit6:
+       rcu_read_unlock();
+       if (neigh)
+               neigh_release(neigh);
+       dst_release(dst);
+       return rc;
+}
+#endif
+
+/**
+ * i40iw_ipv4_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
+{
+       return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
+}
+
+/**
+ * i40iw_ipv6_is_loopback - check if loopback
+ * @loc_addr: local addr to compare
+ * @rem_addr: remote address
+ */
+static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
+{
+       struct in6_addr raddr6;
+
+       i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
+       return (!memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6));
+}
+
+/**
+ * i40iw_make_cm_node - create a new instance of a cm node
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ * @listener: passive connection's listener
+ */
+static struct i40iw_cm_node *i40iw_make_cm_node(
+                                  struct i40iw_cm_core *cm_core,
+                                  struct i40iw_device *iwdev,
+                                  struct i40iw_cm_info *cm_info,
+                                  struct i40iw_cm_listener *listener)
+{
+       struct i40iw_cm_node *cm_node;
+       struct timespec ts;
+       int oldarpindex;
+       int arpindex;
+       struct net_device *netdev = iwdev->netdev;
+
+       /* create an hte and cm_node for this instance */
+       cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
+       if (!cm_node)
+               return NULL;
+
+       /* set our node specific transport info */
+       cm_node->ipv4 = cm_info->ipv4;
+       cm_node->vlan_id = cm_info->vlan_id;
+       memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
+       memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
+       cm_node->loc_port = cm_info->loc_port;
+       cm_node->rem_port = cm_info->rem_port;
+
+       cm_node->mpa_frame_rev = iwdev->mpa_version;
+       cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
+       cm_node->ird_size = I40IW_MAX_IRD_SIZE;
+       cm_node->ord_size = I40IW_MAX_ORD_SIZE;
+
+       cm_node->listener = listener;
+       cm_node->cm_id = cm_info->cm_id;
+       ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
+       spin_lock_init(&cm_node->retrans_list_lock);
+
+       atomic_set(&cm_node->ref_count, 1);
+       /* associate our parent CM core */
+       cm_node->cm_core = cm_core;
+       cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
+       cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+       cm_node->tcp_cntxt.rcv_wnd =
+                       I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
+       ts = current_kernel_time();
+       cm_node->tcp_cntxt.loc_seq_num = htonl(ts.tv_nsec);
+       cm_node->tcp_cntxt.mss = iwdev->mss;
+
+       cm_node->iwdev = iwdev;
+       cm_node->dev = &iwdev->sc_dev;
+
+       if ((cm_node->ipv4 &&
+            i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+            (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
+                                                      cm_node->rem_addr))) {
+               arpindex = i40iw_arp_table(iwdev,
+                                          cm_node->rem_addr,
+                                          false,
+                                          NULL,
+                                          I40IW_ARP_RESOLVE);
+       } else {
+               oldarpindex = i40iw_arp_table(iwdev,
+                                             cm_node->rem_addr,
+                                             false,
+                                             NULL,
+                                             I40IW_ARP_RESOLVE);
+               if (cm_node->ipv4)
+                       arpindex = i40iw_addr_resolve_neigh(iwdev,
+                                                           cm_info->loc_addr[0],
+                                                           cm_info->rem_addr[0],
+                                                           oldarpindex);
+#if IS_ENABLED(CONFIG_IPV6)
+               else
+                       arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
+                                                                cm_info->loc_addr,
+                                                                cm_info->rem_addr,
+                                                                oldarpindex);
+#endif
+       }
+       if (arpindex < 0) {
+               i40iw_pr_err("cm_node arpindex\n");
+               kfree(cm_node);
+               return NULL;
+       }
+       ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
+       i40iw_add_hte_node(cm_core, cm_node);
+       cm_core->stats_nodes_created++;
+       return cm_node;
+}
+
+/**
+ * i40iw_rem_ref_cm_node - destroy an instance of a cm node
+ * @cm_node: connection's node
+ */
+static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_cm_core *cm_core = cm_node->cm_core;
+       struct i40iw_qp *iwqp;
+       struct i40iw_cm_info nfo;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
+       if (atomic_dec_return(&cm_node->ref_count)) {
+               spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+               return;
+       }
+       list_del(&cm_node->list);
+       spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
+
+       /* if the node is destroyed before connection was accelerated */
+       if (!cm_node->accelerated && cm_node->accept_pend) {
+               pr_err("node destroyed before established\n");
+               atomic_dec(&cm_node->listener->pend_accepts_cnt);
+       }
+       if (cm_node->close_entry)
+               i40iw_handle_close_entry(cm_node, 0);
+       if (cm_node->listener) {
+               i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
+       } else {
+               if (!i40iw_listen_port_in_use(cm_core, htons(cm_node->loc_port)) &&
+                   cm_node->apbvt_set && cm_node->iwdev) {
+                       i40iw_manage_apbvt(cm_node->iwdev,
+                                          cm_node->loc_port,
+                                          I40IW_MANAGE_APBVT_DEL);
+                       i40iw_get_addr_info(cm_node, &nfo);
+                       if (cm_node->qhash_set) {
+                               i40iw_manage_qhash(cm_node->iwdev,
+                                                  &nfo,
+                                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                                  NULL,
+                                                  false);
+                               cm_node->qhash_set = 0;
+                       }
+               }
+       }
+
+       iwqp = cm_node->iwqp;
+       if (iwqp) {
+               iwqp->cm_node = NULL;
+               i40iw_rem_ref(&iwqp->ibqp);
+               cm_node->iwqp = NULL;
+       } else if (cm_node->qhash_set) {
+               i40iw_get_addr_info(cm_node, &nfo);
+               i40iw_manage_qhash(cm_node->iwdev,
+                                  &nfo,
+                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                  NULL,
+                                  false);
+               cm_node->qhash_set = 0;
+       }
+
+       cm_node->cm_core->stats_nodes_destroyed++;
+       kfree(cm_node);
+}
+
+/**
+ * i40iw_handle_fin_pkt - FIN packet received
+ * @cm_node: connection's node
+ */
+static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
+{
+       u32 ret;
+
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_RCVD:
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_MPAREJ_RCVD:
+               cm_node->tcp_cntxt.rcv_nxt++;
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_LAST_ACK;
+               i40iw_send_fin(cm_node);
+               break;
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+               cm_node->tcp_cntxt.rcv_nxt++;
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_FIN_WAIT1:
+               cm_node->tcp_cntxt.rcv_nxt++;
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSING;
+               i40iw_send_ack(cm_node);
+               /*
+                * Wait for ACK as this is simultaneous close.
+                * After we receive ACK, do not send anything.
+                * Just rm the node.
+                */
+               break;
+       case I40IW_CM_STATE_FIN_WAIT2:
+               cm_node->tcp_cntxt.rcv_nxt++;
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_TIME_WAIT;
+               i40iw_send_ack(cm_node);
+               ret =
+                   i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
+               if (ret)
+                       i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
+               break;
+       case I40IW_CM_STATE_TIME_WAIT:
+               cm_node->tcp_cntxt.rcv_nxt++;
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       case I40IW_CM_STATE_OFFLOADED:
+       default:
+               i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
+               break;
+       }
+}
+
+/**
+ * i40iw_handle_rst_pkt - process received RST packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
+                                struct i40iw_puda_buf *rbuf)
+{
+       i40iw_cleanup_retrans_entry(cm_node);
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               switch (cm_node->mpa_frame_rev) {
+               case IETF_MPA_V2:
+                       cm_node->mpa_frame_rev = IETF_MPA_V1;
+                       /* send a syn and goto syn sent state */
+                       cm_node->state = I40IW_CM_STATE_SYN_SENT;
+                       if (i40iw_send_syn(cm_node, 0))
+                               i40iw_active_open_err(cm_node, false);
+                       break;
+               case IETF_MPA_V1:
+               default:
+                       i40iw_active_open_err(cm_node, false);
+                       break;
+               }
+               break;
+       case I40IW_CM_STATE_MPAREQ_RCVD:
+               atomic_add_return(1, &cm_node->passive_state);
+               break;
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_SYN_RCVD:
+       case I40IW_CM_STATE_LISTENING:
+               i40iw_pr_err("Bad state state = %d\n", cm_node->state);
+               i40iw_passive_open_err(cm_node, false);
+               break;
+       case I40IW_CM_STATE_OFFLOADED:
+               i40iw_active_open_err(cm_node, false);
+               break;
+       case I40IW_CM_STATE_CLOSED:
+               break;
+       case I40IW_CM_STATE_FIN_WAIT2:
+       case I40IW_CM_STATE_FIN_WAIT1:
+       case I40IW_CM_STATE_LAST_ACK:
+               cm_node->cm_id->rem_ref(cm_node->cm_id);
+       case I40IW_CM_STATE_TIME_WAIT:
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
+                                struct i40iw_puda_buf *rbuf)
+{
+       int ret;
+       int datasize = rbuf->datalen;
+       u8 *dataloc = rbuf->data;
+
+       enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
+       u32 res_type;
+
+       ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
+       if (ret) {
+               if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
+                       i40iw_active_open_err(cm_node, true);
+               else
+                       i40iw_passive_open_err(cm_node, true);
+               return;
+       }
+
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_ESTABLISHED:
+               if (res_type == I40IW_MPA_REQUEST_REJECT)
+                       i40iw_pr_err("state for reject\n");
+               cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
+               type = I40IW_CM_EVENT_MPA_REQ;
+               i40iw_send_ack(cm_node);        /* ACK received MPA request */
+               atomic_set(&cm_node->passive_state,
+                          I40IW_PASSIVE_STATE_INDICATED);
+               break;
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               i40iw_cleanup_retrans_entry(cm_node);
+               if (res_type == I40IW_MPA_REQUEST_REJECT) {
+                       type = I40IW_CM_EVENT_MPA_REJECT;
+                       cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
+               } else {
+                       type = I40IW_CM_EVENT_CONNECTED;
+                       cm_node->state = I40IW_CM_STATE_OFFLOADED;
+                       i40iw_send_ack(cm_node);
+               }
+               break;
+       default:
+               pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
+               break;
+       }
+       i40iw_create_event(cm_node, type);
+}
+
+/**
+ * i40iw_indicate_pkt_err - Send up err event to cm
+ * @cm_node: connection's node
+ */
+static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
+{
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               i40iw_active_open_err(cm_node, true);
+               break;
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_SYN_RCVD:
+               i40iw_passive_open_err(cm_node, true);
+               break;
+       case I40IW_CM_STATE_OFFLOADED:
+       default:
+               break;
+       }
+}
+
+/**
+ * i40iw_check_syn - Check for error on received syn ack
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+       int err = 0;
+
+       if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
+               err = 1;
+               i40iw_active_open_err(cm_node, true);
+       }
+       return err;
+}
+
+/**
+ * i40iw_check_seq - check seq numbers if OK
+ * @cm_node: connection's node
+ * @tcph: pointer tcp header
+ */
+static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
+{
+       int err = 0;
+       u32 seq;
+       u32 ack_seq;
+       u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
+       u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
+       u32 rcv_wnd;
+
+       seq = ntohl(tcph->seq);
+       ack_seq = ntohl(tcph->ack_seq);
+       rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
+       if (ack_seq != loc_seq_num)
+               err = -1;
+       else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
+               err = -1;
+       if (err) {
+               i40iw_pr_err("seq number\n");
+               i40iw_indicate_pkt_err(cm_node);
+       }
+       return err;
+}
+
+/**
+ * i40iw_handle_syn_pkt - is for Passive node
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
+                                struct i40iw_puda_buf *rbuf)
+{
+       struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+       int ret;
+       u32 inc_sequence;
+       int optionsize;
+       struct i40iw_cm_info nfo;
+
+       optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+       inc_sequence = ntohl(tcph->seq);
+
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               /* Rcvd syn on active open connection */
+               i40iw_active_open_err(cm_node, 1);
+               break;
+       case I40IW_CM_STATE_LISTENING:
+               /* Passive OPEN */
+               if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
+                   cm_node->listener->backlog) {
+                       cm_node->cm_core->stats_backlog_drops++;
+                       i40iw_passive_open_err(cm_node, false);
+                       break;
+               }
+               ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+               if (ret) {
+                       i40iw_passive_open_err(cm_node, false);
+                       /* drop pkt */
+                       break;
+               }
+               cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+               cm_node->accept_pend = 1;
+               atomic_inc(&cm_node->listener->pend_accepts_cnt);
+
+               cm_node->state = I40IW_CM_STATE_SYN_RCVD;
+               i40iw_get_addr_info(cm_node, &nfo);
+               ret = i40iw_manage_qhash(cm_node->iwdev,
+                                        &nfo,
+                                        I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                        I40IW_QHASH_MANAGE_TYPE_ADD,
+                                        (void *)cm_node,
+                                        false);
+               cm_node->qhash_set = true;
+               break;
+       case I40IW_CM_STATE_CLOSED:
+               i40iw_cleanup_retrans_entry(cm_node);
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_OFFLOADED:
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_FIN_WAIT1:
+       case I40IW_CM_STATE_FIN_WAIT2:
+       case I40IW_CM_STATE_MPAREQ_RCVD:
+       case I40IW_CM_STATE_LAST_ACK:
+       case I40IW_CM_STATE_CLOSING:
+       case I40IW_CM_STATE_UNKNOWN:
+       default:
+               break;
+       }
+}
+
+/**
+ * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
+                                   struct i40iw_puda_buf *rbuf)
+{
+       struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+       int ret;
+       u32 inc_sequence;
+       int optionsize;
+
+       optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+       inc_sequence = ntohl(tcph->seq);
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_SENT:
+               i40iw_cleanup_retrans_entry(cm_node);
+               /* active open */
+               if (i40iw_check_syn(cm_node, tcph)) {
+                       i40iw_pr_err("check syn fail\n");
+                       return;
+               }
+               cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+               /* setup options */
+               ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
+               if (ret) {
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "cm_node=%p tcp_options failed\n",
+                                   cm_node);
+                       break;
+               }
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
+               i40iw_send_ack(cm_node);        /* ACK  for the syn_ack */
+               ret = i40iw_send_mpa_request(cm_node);
+               if (ret) {
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "cm_node=%p i40iw_send_mpa_request failed\n",
+                                   cm_node);
+                       break;
+               }
+               cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
+               break;
+       case I40IW_CM_STATE_MPAREQ_RCVD:
+               i40iw_passive_open_err(cm_node, true);
+               break;
+       case I40IW_CM_STATE_LISTENING:
+               cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_CLOSED:
+               cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
+               i40iw_cleanup_retrans_entry(cm_node);
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_FIN_WAIT1:
+       case I40IW_CM_STATE_FIN_WAIT2:
+       case I40IW_CM_STATE_LAST_ACK:
+       case I40IW_CM_STATE_OFFLOADED:
+       case I40IW_CM_STATE_CLOSING:
+       case I40IW_CM_STATE_UNKNOWN:
+       case I40IW_CM_STATE_MPAREQ_SENT:
+       default:
+               break;
+       }
+}
+
+/**
+ * i40iw_handle_ack_pkt - process packet with ACK
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
+                               struct i40iw_puda_buf *rbuf)
+{
+       struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+       u32 inc_sequence;
+       int ret = 0;
+       int optionsize;
+       u32 datasize = rbuf->datalen;
+
+       optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
+
+       if (i40iw_check_seq(cm_node, tcph))
+               return -EINVAL;
+
+       inc_sequence = ntohl(tcph->seq);
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_RCVD:
+               i40iw_cleanup_retrans_entry(cm_node);
+               ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
+               if (ret)
+                       break;
+               cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+               cm_node->state = I40IW_CM_STATE_ESTABLISHED;
+               if (datasize) {
+                       cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+                       i40iw_handle_rcv_mpa(cm_node, rbuf);
+               }
+               break;
+       case I40IW_CM_STATE_ESTABLISHED:
+               i40iw_cleanup_retrans_entry(cm_node);
+               if (datasize) {
+                       cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+                       i40iw_handle_rcv_mpa(cm_node, rbuf);
+               }
+               break;
+       case I40IW_CM_STATE_MPAREQ_SENT:
+               cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
+               if (datasize) {
+                       cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+                       i40iw_handle_rcv_mpa(cm_node, rbuf);
+               }
+               break;
+       case I40IW_CM_STATE_LISTENING:
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_CLOSED:
+               i40iw_cleanup_retrans_entry(cm_node);
+               atomic_inc(&cm_node->ref_count);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_LAST_ACK:
+       case I40IW_CM_STATE_CLOSING:
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_CLOSED;
+               if (!cm_node->accept_pend)
+                       cm_node->cm_id->rem_ref(cm_node->cm_id);
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       case I40IW_CM_STATE_FIN_WAIT1:
+               i40iw_cleanup_retrans_entry(cm_node);
+               cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
+               break;
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_FIN_WAIT2:
+       case I40IW_CM_STATE_OFFLOADED:
+       case I40IW_CM_STATE_MPAREQ_RCVD:
+       case I40IW_CM_STATE_UNKNOWN:
+       default:
+               i40iw_cleanup_retrans_entry(cm_node);
+               break;
+       }
+       return ret;
+}
+
+/**
+ * i40iw_process_packet - process cm packet
+ * @cm_node: connection's node
+ * @rbuf: receive buffer
+ */
+static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
+                                struct i40iw_puda_buf *rbuf)
+{
+       enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
+       struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
+       u32 fin_set = 0;
+       int ret;
+
+       if (tcph->rst) {
+               pkt_type = I40IW_PKT_TYPE_RST;
+       } else if (tcph->syn) {
+               pkt_type = I40IW_PKT_TYPE_SYN;
+               if (tcph->ack)
+                       pkt_type = I40IW_PKT_TYPE_SYNACK;
+       } else if (tcph->ack) {
+               pkt_type = I40IW_PKT_TYPE_ACK;
+       }
+       if (tcph->fin)
+               fin_set = 1;
+
+       switch (pkt_type) {
+       case I40IW_PKT_TYPE_SYN:
+               i40iw_handle_syn_pkt(cm_node, rbuf);
+               break;
+       case I40IW_PKT_TYPE_SYNACK:
+               i40iw_handle_synack_pkt(cm_node, rbuf);
+               break;
+       case I40IW_PKT_TYPE_ACK:
+               ret = i40iw_handle_ack_pkt(cm_node, rbuf);
+               if (fin_set && !ret)
+                       i40iw_handle_fin_pkt(cm_node);
+               break;
+       case I40IW_PKT_TYPE_RST:
+               i40iw_handle_rst_pkt(cm_node, rbuf);
+               break;
+       default:
+               if (fin_set &&
+                   (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
+                       i40iw_handle_fin_pkt(cm_node);
+               break;
+       }
+}
+
+/**
+ * i40iw_make_listen_node - create a listen node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_listener *i40iw_make_listen_node(
+                                       struct i40iw_cm_core *cm_core,
+                                       struct i40iw_device *iwdev,
+                                       struct i40iw_cm_info *cm_info)
+{
+       struct i40iw_cm_listener *listener;
+       unsigned long flags;
+
+       /* cannot have multiple matching listeners */
+       listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
+                                      cm_info->loc_port,
+                                      cm_info->vlan_id,
+                                      I40IW_CM_LISTENER_EITHER_STATE);
+       if (listener &&
+           (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
+               atomic_dec(&listener->ref_count);
+               i40iw_debug(cm_core->dev,
+                           I40IW_DEBUG_CM,
+                           "Not creating listener since it already exists\n");
+               return NULL;
+       }
+
+       if (!listener) {
+               /* create a CM listen node (1/2 node to compare incoming traffic to) */
+               listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
+               if (!listener)
+                       return NULL;
+               cm_core->stats_listen_nodes_created++;
+               memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
+               listener->loc_port = cm_info->loc_port;
+
+               INIT_LIST_HEAD(&listener->child_listen_list);
+
+               atomic_set(&listener->ref_count, 1);
+       } else {
+               listener->reused_node = 1;
+       }
+
+       listener->cm_id = cm_info->cm_id;
+       listener->ipv4 = cm_info->ipv4;
+       listener->vlan_id = cm_info->vlan_id;
+       atomic_set(&listener->pend_accepts_cnt, 0);
+       listener->cm_core = cm_core;
+       listener->iwdev = iwdev;
+
+       listener->backlog = cm_info->backlog;
+       listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
+
+       if (!listener->reused_node) {
+               spin_lock_irqsave(&cm_core->listen_list_lock, flags);
+               list_add(&listener->list, &cm_core->listen_nodes);
+               spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
+       }
+
+       return listener;
+}
+
+/**
+ * i40iw_create_cm_node - make a connection node with params
+ * @cm_core: cm's core
+ * @iwdev: iwarp device structure
+ * @private_data_len: len to provate data for mpa request
+ * @private_data: pointer to private data for connection
+ * @cm_info: quad info for connection
+ */
+static struct i40iw_cm_node *i40iw_create_cm_node(
+                                       struct i40iw_cm_core *cm_core,
+                                       struct i40iw_device *iwdev,
+                                       u16 private_data_len,
+                                       void *private_data,
+                                       struct i40iw_cm_info *cm_info)
+{
+       int ret;
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_cm_listener *loopback_remotelistener;
+       struct i40iw_cm_node *loopback_remotenode;
+       struct i40iw_cm_info loopback_cm_info;
+
+       /* create a CM connection node */
+       cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
+       if (!cm_node)
+               return NULL;
+       /* set our node side to client (active) side */
+       cm_node->tcp_cntxt.client = 1;
+       cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
+
+       if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
+               loopback_remotelistener = i40iw_find_listener(
+                                               cm_core,
+                                               cm_info->rem_addr,
+                                               cm_node->rem_port,
+                                               cm_node->vlan_id,
+                                               I40IW_CM_LISTENER_ACTIVE_STATE);
+               if (!loopback_remotelistener) {
+                       i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
+               } else {
+                       loopback_cm_info = *cm_info;
+                       loopback_cm_info.loc_port = cm_info->rem_port;
+                       loopback_cm_info.rem_port = cm_info->loc_port;
+                       loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
+                       loopback_cm_info.ipv4 = cm_info->ipv4;
+                       loopback_remotenode = i40iw_make_cm_node(cm_core,
+                                                                iwdev,
+                                                                &loopback_cm_info,
+                                                                loopback_remotelistener);
+                       if (!loopback_remotenode) {
+                               i40iw_rem_ref_cm_node(cm_node);
+                               return NULL;
+                       }
+                       cm_core->stats_loopbacks++;
+                       loopback_remotenode->loopbackpartner = cm_node;
+                       loopback_remotenode->tcp_cntxt.rcv_wscale =
+                               I40IW_CM_DEFAULT_RCV_WND_SCALE;
+                       cm_node->loopbackpartner = loopback_remotenode;
+                       memcpy(loopback_remotenode->pdata_buf, private_data,
+                              private_data_len);
+                       loopback_remotenode->pdata.size = private_data_len;
+
+                       cm_node->state = I40IW_CM_STATE_OFFLOADED;
+                       cm_node->tcp_cntxt.rcv_nxt =
+                               loopback_remotenode->tcp_cntxt.loc_seq_num;
+                       loopback_remotenode->tcp_cntxt.rcv_nxt =
+                               cm_node->tcp_cntxt.loc_seq_num;
+                       cm_node->tcp_cntxt.max_snd_wnd =
+                               loopback_remotenode->tcp_cntxt.rcv_wnd;
+                       loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+                       cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
+                       loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
+                       cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
+                       loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
+                       loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD;
+                       i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ);
+               }
+               return cm_node;
+       }
+
+       cm_node->pdata.size = private_data_len;
+       cm_node->pdata.addr = cm_node->pdata_buf;
+
+       memcpy(cm_node->pdata_buf, private_data, private_data_len);
+
+       cm_node->state = I40IW_CM_STATE_SYN_SENT;
+       ret = i40iw_send_syn(cm_node, 0);
+
+       if (ret) {
+               if (cm_node->ipv4)
+                       i40iw_debug(cm_node->dev,
+                                   I40IW_DEBUG_CM,
+                                   "Api - connect() FAILED: dest addr=%pI4",
+                                   cm_node->rem_addr);
+               else
+                       i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
+                                   "Api - connect() FAILED: dest addr=%pI6",
+                                   cm_node->rem_addr);
+               i40iw_rem_ref_cm_node(cm_node);
+               cm_node = NULL;
+       }
+
+       if (cm_node)
+               i40iw_debug(cm_node->dev,
+                           I40IW_DEBUG_CM,
+                           "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
+                           cm_node->rem_port,
+                           cm_node,
+                           cm_node->cm_id);
+
+       return cm_node;
+}
+
+/**
+ * i40iw_cm_reject - reject and teardown a connection
+ * @cm_node: connection's node
+ * @pdate: ptr to private data for reject
+ * @plen: size of private data
+ */
+static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
+{
+       int ret = 0;
+       int err;
+       int passive_state;
+       struct iw_cm_id *cm_id = cm_node->cm_id;
+       struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
+
+       if (cm_node->tcp_cntxt.client)
+               return ret;
+       i40iw_cleanup_retrans_entry(cm_node);
+
+       if (!loopback) {
+               passive_state = atomic_add_return(1, &cm_node->passive_state);
+               if (passive_state == I40IW_SEND_RESET_EVENT) {
+                       cm_node->state = I40IW_CM_STATE_CLOSED;
+                       i40iw_rem_ref_cm_node(cm_node);
+               } else {
+                       if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+                               i40iw_rem_ref_cm_node(cm_node);
+                       } else {
+                               ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
+                               if (ret) {
+                                       cm_node->state = I40IW_CM_STATE_CLOSED;
+                                       err = i40iw_send_reset(cm_node);
+                                       if (err)
+                                               i40iw_pr_err("send reset failed\n");
+                               } else {
+                                       cm_id->add_ref(cm_id);
+                               }
+                       }
+               }
+       } else {
+               cm_node->cm_id = NULL;
+               if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+                       i40iw_rem_ref_cm_node(cm_node);
+                       i40iw_rem_ref_cm_node(loopback);
+               } else {
+                       ret = i40iw_send_cm_event(loopback,
+                                                 loopback->cm_id,
+                                                 IW_CM_EVENT_CONNECT_REPLY,
+                                                 -ECONNREFUSED);
+                       i40iw_rem_ref_cm_node(cm_node);
+                       loopback->state = I40IW_CM_STATE_CLOSING;
+
+                       cm_id = loopback->cm_id;
+                       i40iw_rem_ref_cm_node(loopback);
+                       cm_id->rem_ref(cm_id);
+               }
+       }
+
+       return ret;
+}
+
+/**
+ * i40iw_cm_close - close of cm connection
+ * @cm_node: connection's node
+ */
+static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
+{
+       int ret = 0;
+
+       if (!cm_node)
+               return -EINVAL;
+
+       switch (cm_node->state) {
+       case I40IW_CM_STATE_SYN_RCVD:
+       case I40IW_CM_STATE_SYN_SENT:
+       case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
+       case I40IW_CM_STATE_ESTABLISHED:
+       case I40IW_CM_STATE_ACCEPTING:
+       case I40IW_CM_STATE_MPAREQ_SENT:
+       case I40IW_CM_STATE_MPAREQ_RCVD:
+               i40iw_cleanup_retrans_entry(cm_node);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_CLOSE_WAIT:
+               cm_node->state = I40IW_CM_STATE_LAST_ACK;
+               i40iw_send_fin(cm_node);
+               break;
+       case I40IW_CM_STATE_FIN_WAIT1:
+       case I40IW_CM_STATE_FIN_WAIT2:
+       case I40IW_CM_STATE_LAST_ACK:
+       case I40IW_CM_STATE_TIME_WAIT:
+       case I40IW_CM_STATE_CLOSING:
+               ret = -1;
+               break;
+       case I40IW_CM_STATE_LISTENING:
+               i40iw_cleanup_retrans_entry(cm_node);
+               i40iw_send_reset(cm_node);
+               break;
+       case I40IW_CM_STATE_MPAREJ_RCVD:
+       case I40IW_CM_STATE_UNKNOWN:
+       case I40IW_CM_STATE_INITED:
+       case I40IW_CM_STATE_CLOSED:
+       case I40IW_CM_STATE_LISTENER_DESTROYED:
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       case I40IW_CM_STATE_OFFLOADED:
+               if (cm_node->send_entry)
+                       i40iw_pr_err("send_entry\n");
+               i40iw_rem_ref_cm_node(cm_node);
+               break;
+       }
+       return ret;
+}
+
+/**
+ * i40iw_receive_ilq - recv an ETHERNET packet, and process it
+ * through CM
+ * @dev: FPK dev struct
+ * @rbuf: receive buffer
+ */
+void i40iw_receive_ilq(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *rbuf)
+{
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_cm_listener *listener;
+       struct iphdr *iph;
+       struct ipv6hdr *ip6h;
+       struct tcphdr *tcph;
+       struct i40iw_cm_info cm_info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+       struct vlan_ethhdr *ethh;
+
+       /* if vlan, then maclen = 18 else 14 */
+       iph = (struct iphdr *)rbuf->iph;
+       memset(&cm_info, 0, sizeof(cm_info));
+
+       i40iw_debug_buf(dev,
+                       I40IW_DEBUG_ILQ,
+                       "RECEIVE ILQ BUFFER",
+                       rbuf->mem.va,
+                       rbuf->totallen);
+       ethh = (struct vlan_ethhdr *)rbuf->mem.va;
+
+       if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
+               cm_info.vlan_id = ntohs(ethh->h_vlan_TCI) & VLAN_VID_MASK;
+               i40iw_debug(cm_core->dev,
+                           I40IW_DEBUG_CM,
+                           "%s vlan_id=%d\n",
+                           __func__,
+                           cm_info.vlan_id);
+       } else {
+               cm_info.vlan_id = I40IW_NO_VLAN;
+       }
+       tcph = (struct tcphdr *)rbuf->tcph;
+
+       if (rbuf->ipv4) {
+               cm_info.loc_addr[0] = ntohl(iph->daddr);
+               cm_info.rem_addr[0] = ntohl(iph->saddr);
+               cm_info.ipv4 = true;
+       } else {
+               ip6h = (struct ipv6hdr *)rbuf->iph;
+               i40iw_copy_ip_ntohl(cm_info.loc_addr,
+                                   ip6h->daddr.in6_u.u6_addr32);
+               i40iw_copy_ip_ntohl(cm_info.rem_addr,
+                                   ip6h->saddr.in6_u.u6_addr32);
+               cm_info.ipv4 = false;
+       }
+       cm_info.loc_port = ntohs(tcph->dest);
+       cm_info.rem_port = ntohs(tcph->source);
+       cm_node = i40iw_find_node(cm_core,
+                                 cm_info.rem_port,
+                                 cm_info.rem_addr,
+                                 cm_info.loc_port,
+                                 cm_info.loc_addr,
+                                 true);
+
+       if (!cm_node) {
+               /* Only type of packet accepted are for */
+               /* the PASSIVE open (syn only) */
+               if (!tcph->syn || tcph->ack)
+                       return;
+               listener =
+                   i40iw_find_listener(cm_core,
+                                       cm_info.loc_addr,
+                                       cm_info.loc_port,
+                                       cm_info.vlan_id,
+                                       I40IW_CM_LISTENER_ACTIVE_STATE);
+               if (!listener) {
+                       cm_info.cm_id = NULL;
+                       i40iw_debug(cm_core->dev,
+                                   I40IW_DEBUG_CM,
+                                   "%s no listener found\n",
+                                   __func__);
+                       return;
+               }
+               cm_info.cm_id = listener->cm_id;
+               cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
+               if (!cm_node) {
+                       i40iw_debug(cm_core->dev,
+                                   I40IW_DEBUG_CM,
+                                   "%s allocate node failed\n",
+                                   __func__);
+                       atomic_dec(&listener->ref_count);
+                       return;
+               }
+               if (!tcph->rst && !tcph->fin) {
+                       cm_node->state = I40IW_CM_STATE_LISTENING;
+               } else {
+                       i40iw_rem_ref_cm_node(cm_node);
+                       return;
+               }
+               atomic_inc(&cm_node->ref_count);
+       } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
+               i40iw_rem_ref_cm_node(cm_node);
+               return;
+       }
+       i40iw_process_packet(cm_node, rbuf);
+       i40iw_rem_ref_cm_node(cm_node);
+}
+
+/**
+ * i40iw_setup_cm_core - allocate a top level instance of a cm
+ * core
+ * @iwdev: iwarp device structure
+ */
+void i40iw_setup_cm_core(struct i40iw_device *iwdev)
+{
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+       cm_core->iwdev = iwdev;
+       cm_core->dev = &iwdev->sc_dev;
+
+       INIT_LIST_HEAD(&cm_core->connected_nodes);
+       INIT_LIST_HEAD(&cm_core->listen_nodes);
+
+       init_timer(&cm_core->tcp_timer);
+       cm_core->tcp_timer.function = i40iw_cm_timer_tick;
+       cm_core->tcp_timer.data = (unsigned long)cm_core;
+
+       spin_lock_init(&cm_core->ht_lock);
+       spin_lock_init(&cm_core->listen_list_lock);
+
+       cm_core->event_wq = create_singlethread_workqueue("iwewq");
+       cm_core->disconn_wq = create_singlethread_workqueue("iwdwq");
+}
+
+/**
+ * i40iw_cleanup_cm_core - deallocate a top level instance of a
+ * cm core
+ * @cm_core: cm's core
+ */
+void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
+{
+       unsigned long flags;
+
+       if (!cm_core)
+               return;
+
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+       if (timer_pending(&cm_core->tcp_timer))
+               del_timer_sync(&cm_core->tcp_timer);
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+       destroy_workqueue(cm_core->event_wq);
+       destroy_workqueue(cm_core->disconn_wq);
+}
+
+/**
+ * i40iw_init_tcp_ctx - setup qp context
+ * @cm_node: connection's node
+ * @tcp_info: offload info for tcp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
+                              struct i40iw_tcp_offload_info *tcp_info,
+                              struct i40iw_qp *iwqp)
+{
+       tcp_info->ipv4 = cm_node->ipv4;
+       tcp_info->drop_ooo_seg = true;
+       tcp_info->wscale = true;
+       tcp_info->ignore_tcp_opt = true;
+       tcp_info->ignore_tcp_uns_opt = true;
+       tcp_info->no_nagle = false;
+
+       tcp_info->ttl = I40IW_DEFAULT_TTL;
+       tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
+       tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
+       tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
+
+       tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+       tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
+       tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
+
+       tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+       tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
+       tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+       tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+
+       tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+       tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
+       tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
+       tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
+       tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
+       tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
+                                       cm_node->tcp_cntxt.rcv_wscale);
+
+       tcp_info->flow_label = 0;
+       tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
+       if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
+               tcp_info->insert_vlan_tag = true;
+               tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id);
+       }
+       if (cm_node->ipv4) {
+               tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+               tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+
+               tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
+               tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
+               tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(iwqp->iwdev,
+                                                               &tcp_info->dest_ip_addr3,
+                                                               true,
+                                                               NULL,
+                                                               I40IW_ARP_RESOLVE));
+       } else {
+               tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
+               tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
+               tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
+               tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
+               tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
+               tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
+               tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
+               tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
+               tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
+               tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
+               tcp_info->arp_idx = cpu_to_le32(i40iw_arp_table(
+                                                       iwqp->iwdev,
+                                                       &tcp_info->dest_ip_addr0,
+                                                       false,
+                                                       NULL,
+                                                       I40IW_ARP_RESOLVE));
+       }
+}
+
+/**
+ * i40iw_cm_init_tsa_conn - setup qp for RTS
+ * @iwqp: associate qp for the connection
+ * @cm_node: connection's node
+ */
+static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
+                                  struct i40iw_cm_node *cm_node)
+{
+       struct i40iw_tcp_offload_info tcp_info;
+       struct i40iwarp_offload_info *iwarp_info;
+       struct i40iw_qp_host_ctx_info *ctx_info;
+       struct i40iw_device *iwdev = iwqp->iwdev;
+       struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
+
+       memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
+       iwarp_info = &iwqp->iwarp_info;
+       ctx_info = &iwqp->ctx_info;
+
+       ctx_info->tcp_info = &tcp_info;
+       ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+       ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+
+       iwarp_info->ord_size = cm_node->ord_size;
+       iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
+
+       if (iwarp_info->ord_size == 1)
+               iwarp_info->ord_size = 2;
+
+       iwarp_info->rd_enable = true;
+       iwarp_info->rdmap_ver = 1;
+       iwarp_info->ddp_ver = 1;
+
+       iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
+
+       ctx_info->tcp_info_valid = true;
+       ctx_info->iwarp_info_valid = true;
+
+       i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
+       if (cm_node->snd_mark_en) {
+               iwarp_info->snd_mark_en = true;
+               iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
+                               SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
+       }
+
+       cm_node->state = I40IW_CM_STATE_OFFLOADED;
+       tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+       tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
+
+       dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
+
+       /* once tcp_info is set, no need to do it again */
+       ctx_info->tcp_info_valid = false;
+       ctx_info->iwarp_info_valid = false;
+}
+
+/**
+ * i40iw_cm_disconn - when a connection is being closed
+ * @iwqp: associate qp for the connection
+ */
+int i40iw_cm_disconn(struct i40iw_qp *iwqp)
+{
+       struct disconn_work *work;
+       struct i40iw_device *iwdev = iwqp->iwdev;
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+       work = kzalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+               return -ENOMEM; /* Timer will clean up */
+
+       i40iw_add_ref(&iwqp->ibqp);
+       work->iwqp = iwqp;
+       INIT_WORK(&work->work, i40iw_disconnect_worker);
+       queue_work(cm_core->disconn_wq, &work->work);
+       return 0;
+}
+
+/**
+ * i40iw_loopback_nop - Send a nop
+ * @qp: associated hw qp
+ */
+static void i40iw_loopback_nop(struct i40iw_sc_qp *qp)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = qp->qp_uk.sq_base->elem;
+       set_64bit_val(wqe, 0, 0);
+       set_64bit_val(wqe, 8, 0);
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+           LS_64(0, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+       set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_qp_disconnect - free qp and close cm
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
+{
+       struct i40iw_device *iwdev;
+       struct i40iw_ib_device *iwibdev;
+
+       iwdev = to_iwdev(iwqp->ibqp.device);
+       if (!iwdev) {
+               i40iw_pr_err("iwdev == NULL\n");
+               return;
+       }
+
+       iwibdev = iwdev->iwibdev;
+
+       if (iwqp->active_conn) {
+               /* indicate this connection is NOT active */
+               iwqp->active_conn = 0;
+       } else {
+               /* Need to free the Last Streaming Mode Message */
+               if (iwqp->ietf_mem.va) {
+                       if (iwqp->lsmm_mr)
+                               iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
+                       i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
+               }
+       }
+
+       /* close the CM node down if it is still active */
+       if (iwqp->cm_node) {
+               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
+               i40iw_cm_close(iwqp->cm_node);
+       }
+}
+
+/**
+ * i40iw_cm_disconn_true - called by worker thread to disconnect qp
+ * @iwqp: associate qp for the connection
+ */
+static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
+{
+       struct iw_cm_id *cm_id;
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+       u16 last_ae;
+       u8 original_hw_tcp_state;
+       u8 original_ibqp_state;
+       int disconn_status = 0;
+       int issue_disconn = 0;
+       int issue_close = 0;
+       int issue_flush = 0;
+       struct ib_event ibevent;
+       unsigned long flags;
+       int ret;
+
+       if (!iwqp) {
+               i40iw_pr_err("iwqp == NULL\n");
+               return;
+       }
+
+       spin_lock_irqsave(&iwqp->lock, flags);
+       cm_id = iwqp->cm_id;
+       /* make sure we havent already closed this connection */
+       if (!cm_id) {
+               spin_unlock_irqrestore(&iwqp->lock, flags);
+               return;
+       }
+
+       iwdev = to_iwdev(iwqp->ibqp.device);
+
+       original_hw_tcp_state = iwqp->hw_tcp_state;
+       original_ibqp_state = iwqp->ibqp_state;
+       last_ae = iwqp->last_aeq;
+
+       if (qp->term_flags) {
+               issue_disconn = 1;
+               issue_close = 1;
+               iwqp->cm_id = NULL;
+               /*When term timer expires after cm_timer, don't want
+                *terminate-handler to issue cm_disconn which can re-free
+                *a QP even after its refcnt=0.
+                */
+               del_timer(&iwqp->terminate_timer);
+               if (!iwqp->flush_issued) {
+                       iwqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
+                  ((original_ibqp_state == IB_QPS_RTS) &&
+                   (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+               issue_disconn = 1;
+               if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
+                       disconn_status = -ECONNRESET;
+       }
+
+       if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
+            (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
+            (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
+            (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
+               issue_close = 1;
+               iwqp->cm_id = NULL;
+               if (!iwqp->flush_issued) {
+                       iwqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       }
+
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+       if (issue_flush && !iwqp->destroyed) {
+               /* Flush the queues */
+               i40iw_flush_wqes(iwdev, iwqp);
+
+               if (qp->term_flags) {
+                       ibevent.device = iwqp->ibqp.device;
+                       ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
+                                       IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
+                       ibevent.element.qp = &iwqp->ibqp;
+                       iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
+               }
+       }
+
+       if (cm_id && cm_id->event_handler) {
+               if (issue_disconn) {
+                       ret = i40iw_send_cm_event(NULL,
+                                                 cm_id,
+                                                 IW_CM_EVENT_DISCONNECT,
+                                                 disconn_status);
+
+                       if (ret)
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "disconnect event failed %s: - cm_id = %p\n",
+                                           __func__, cm_id);
+               }
+               if (issue_close) {
+                       i40iw_qp_disconnect(iwqp);
+                       cm_id->provider_data = iwqp;
+                       ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
+                       if (ret)
+                               i40iw_debug(&iwdev->sc_dev,
+                                           I40IW_DEBUG_CM,
+                                           "close event failed %s: - cm_id = %p\n",
+                                           __func__, cm_id);
+                       cm_id->rem_ref(cm_id);
+               }
+       }
+}
+
+/**
+ * i40iw_disconnect_worker - worker for connection close
+ * @work: points or disconn structure
+ */
+static void i40iw_disconnect_worker(struct work_struct *work)
+{
+       struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+       struct i40iw_qp *iwqp = dwork->iwqp;
+
+       kfree(dwork);
+       i40iw_cm_disconn_true(iwqp);
+       i40iw_rem_ref(&iwqp->ibqp);
+}
+
+/**
+ * i40iw_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+ * @conn_param: accpet parameters
+ */
+int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+       struct ib_qp *ibqp;
+       struct i40iw_qp *iwqp;
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_cm_node *cm_node;
+       struct ib_qp_attr attr;
+       int passive_state;
+       struct i40iw_ib_device *iwibdev;
+       struct ib_mr *ibmr;
+       struct i40iw_pd *iwpd;
+       u16 buf_len = 0;
+       struct i40iw_kmem_info accept;
+       enum i40iw_status_code status;
+       u64 tagged_offset;
+
+       memset(&attr, 0, sizeof(attr));
+       ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+       if (!ibqp)
+               return -EINVAL;
+
+       iwqp = to_iwqp(ibqp);
+       iwdev = iwqp->iwdev;
+       dev = &iwdev->sc_dev;
+       cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+
+       if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
+               cm_node->ipv4 = true;
+               cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
+       } else {
+               cm_node->ipv4 = false;
+               i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id, NULL);
+       }
+       i40iw_debug(cm_node->dev,
+                   I40IW_DEBUG_CM,
+                   "Accept vlan_id=%d\n",
+                   cm_node->vlan_id);
+       if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
+               if (cm_node->loopbackpartner)
+                       i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
+               i40iw_rem_ref_cm_node(cm_node);
+               return -EINVAL;
+       }
+
+       passive_state = atomic_add_return(1, &cm_node->passive_state);
+       if (passive_state == I40IW_SEND_RESET_EVENT) {
+               i40iw_rem_ref_cm_node(cm_node);
+               return -ECONNRESET;
+       }
+
+       cm_node->cm_core->stats_accepts++;
+       iwqp->cm_node = (void *)cm_node;
+       cm_node->iwqp = iwqp;
+
+       buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE + MPA_ZERO_PAD_LEN;
+
+       status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
+
+       if (status)
+               return -ENOMEM;
+       cm_node->pdata.size = conn_param->private_data_len;
+       accept.addr = iwqp->ietf_mem.va;
+       accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
+       memcpy(accept.addr + accept.size, conn_param->private_data,
+              conn_param->private_data_len);
+
+       /* setup our first outgoing iWarp send WQE (the IETF frame response) */
+       if ((cm_node->ipv4 &&
+            !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
+           (!cm_node->ipv4 &&
+            !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
+               iwibdev = iwdev->iwibdev;
+               iwpd = iwqp->iwpd;
+               tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
+               ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
+                                        iwqp->ietf_mem.pa,
+                                        buf_len,
+                                        IB_ACCESS_LOCAL_WRITE,
+                                        &tagged_offset);
+               if (IS_ERR(ibmr)) {
+                       i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
+                       return -ENOMEM;
+               }
+
+               ibmr->pd = &iwpd->ibpd;
+               ibmr->device = iwpd->ibpd.device;
+               iwqp->lsmm_mr = ibmr;
+               if (iwqp->page)
+                       iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+               if (is_remote_ne020_or_chelsio(cm_node))
+                       dev->iw_priv_qp_ops->qp_send_lsmm(
+                                                       &iwqp->sc_qp,
+                                                       iwqp->ietf_mem.va,
+                                                       (accept.size + conn_param->private_data_len),
+                                                       ibmr->lkey);
+               else
+                       dev->iw_priv_qp_ops->qp_send_lsmm(
+                                                       &iwqp->sc_qp,
+                                                       iwqp->ietf_mem.va,
+                                                       (accept.size + conn_param->private_data_len + MPA_ZERO_PAD_LEN),
+                                                       ibmr->lkey);
+
+       } else {
+               if (iwqp->page)
+                       iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+               i40iw_loopback_nop(&iwqp->sc_qp);
+       }
+
+       if (iwqp->page)
+               kunmap(iwqp->page);
+
+       iwqp->cm_id = cm_id;
+       cm_node->cm_id = cm_id;
+
+       cm_id->provider_data = (void *)iwqp;
+       iwqp->active_conn = 0;
+
+       cm_node->lsmm_size = accept.size + conn_param->private_data_len;
+       i40iw_cm_init_tsa_conn(iwqp, cm_node);
+       cm_id->add_ref(cm_id);
+       i40iw_add_ref(&iwqp->ibqp);
+
+       i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
+
+       attr.qp_state = IB_QPS_RTS;
+       cm_node->qhash_set = false;
+       i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+       if (cm_node->loopbackpartner) {
+               cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
+
+               /* copy entire MPA frame to our cm_node's frame */
+               memcpy(cm_node->loopbackpartner->pdata_buf,
+                      conn_param->private_data,
+                      conn_param->private_data_len);
+               i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
+       }
+
+       cm_node->accelerated = 1;
+       if (cm_node->accept_pend) {
+               if (!cm_node->listener)
+                       i40iw_pr_err("cm_node->listener NULL for passive node\n");
+               atomic_dec(&cm_node->listener->pend_accepts_cnt);
+               cm_node->accept_pend = 0;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_reject - registered call for connection to be rejected
+ * @cm_id: cm information for passive connection
+ * @pdata: private data to be sent
+ * @pdata_len: private data length
+ */
+int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
+{
+       struct i40iw_device *iwdev;
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_cm_node *loopback;
+
+       cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
+       loopback = cm_node->loopbackpartner;
+       cm_node->cm_id = cm_id;
+       cm_node->pdata.size = pdata_len;
+
+       iwdev = to_iwdev(cm_id->device);
+       if (!iwdev)
+               return -EINVAL;
+       cm_node->cm_core->stats_rejects++;
+
+       if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
+               return -EINVAL;
+
+       if (loopback) {
+               memcpy(&loopback->pdata_buf, pdata, pdata_len);
+               loopback->pdata.size = pdata_len;
+       }
+
+       return i40iw_cm_reject(cm_node, pdata, pdata_len);
+}
+
+/**
+ * i40iw_connect - registered call for connection to be established
+ * @cm_id: cm information for passive connection
+ * @conn_param: Information about the connection
+ */
+int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+{
+       struct ib_qp *ibqp;
+       struct i40iw_qp *iwqp;
+       struct i40iw_device *iwdev;
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_cm_info cm_info;
+       struct sockaddr_in *laddr;
+       struct sockaddr_in *raddr;
+       struct sockaddr_in6 *laddr6;
+       struct sockaddr_in6 *raddr6;
+       int apbvt_set = 0;
+       enum i40iw_status_code status;
+
+       ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
+       if (!ibqp)
+               return -EINVAL;
+       iwqp = to_iwqp(ibqp);
+       if (!iwqp)
+               return -EINVAL;
+       iwdev = to_iwdev(iwqp->ibqp.device);
+       if (!iwdev)
+               return -EINVAL;
+
+       laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+       raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
+
+       if (!(laddr->sin_port) || !(raddr->sin_port))
+               return -EINVAL;
+
+       iwqp->active_conn = 1;
+       iwqp->cm_id = NULL;
+       cm_id->provider_data = iwqp;
+
+       /* set up the connection params for the node */
+       if (cm_id->remote_addr.ss_family == AF_INET) {
+               cm_info.ipv4 = true;
+               memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
+               memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
+               cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+               cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
+               cm_info.loc_port = ntohs(laddr->sin_port);
+               cm_info.rem_port = ntohs(raddr->sin_port);
+               cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+       } else {
+               cm_info.ipv4 = false;
+               i40iw_copy_ip_ntohl(cm_info.loc_addr,
+                                   laddr6->sin6_addr.in6_u.u6_addr32);
+               i40iw_copy_ip_ntohl(cm_info.rem_addr,
+                                   raddr6->sin6_addr.in6_u.u6_addr32);
+               cm_info.loc_port = ntohs(laddr6->sin6_port);
+               cm_info.rem_port = ntohs(raddr6->sin6_port);
+               i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id, NULL);
+       }
+       cm_info.cm_id = cm_id;
+       if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
+           (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
+                                    raddr6->sin6_addr.in6_u.u6_addr32,
+                                    sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
+               status = i40iw_manage_qhash(iwdev,
+                                           &cm_info,
+                                           I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                           I40IW_QHASH_MANAGE_TYPE_ADD,
+                                           NULL,
+                                           true);
+               if (status)
+                       return -EINVAL;
+       }
+       status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
+       if (status) {
+               i40iw_manage_qhash(iwdev,
+                                  &cm_info,
+                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                  NULL,
+                                  false);
+               return -EINVAL;
+       }
+
+       apbvt_set = 1;
+       cm_id->add_ref(cm_id);
+       cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
+                                      conn_param->private_data_len,
+                                      (void *)conn_param->private_data,
+                                      &cm_info);
+       if (!cm_node) {
+               i40iw_manage_qhash(iwdev,
+                                  &cm_info,
+                                  I40IW_QHASH_TYPE_TCP_ESTABLISHED,
+                                  I40IW_QHASH_MANAGE_TYPE_DELETE,
+                                  NULL,
+                                  false);
+
+               if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
+                                                          cm_info.loc_port))
+                       i40iw_manage_apbvt(iwdev,
+                                          cm_info.loc_port,
+                                          I40IW_MANAGE_APBVT_DEL);
+               cm_id->rem_ref(cm_id);
+               iwdev->cm_core.stats_connect_errs++;
+               return -ENOMEM;
+       }
+
+       i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
+       if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
+           !cm_node->ord_size)
+               cm_node->ord_size = 1;
+
+       cm_node->apbvt_set = apbvt_set;
+       cm_node->qhash_set = true;
+       iwqp->cm_node = cm_node;
+       cm_node->iwqp = iwqp;
+       iwqp->cm_id = cm_id;
+       i40iw_add_ref(&iwqp->ibqp);
+       return 0;
+}
+
+/**
+ * i40iw_create_listen - registered call creating listener
+ * @cm_id: cm information for passive connection
+ * @backlog: to max accept pending count
+ */
+int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
+{
+       struct i40iw_device *iwdev;
+       struct i40iw_cm_listener *cm_listen_node;
+       struct i40iw_cm_info cm_info;
+       enum i40iw_status_code ret;
+       struct sockaddr_in *laddr;
+       struct sockaddr_in6 *laddr6;
+       bool wildcard = false;
+
+       iwdev = to_iwdev(cm_id->device);
+       if (!iwdev)
+               return -EINVAL;
+
+       laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
+       memset(&cm_info, 0, sizeof(cm_info));
+       if (laddr->sin_family == AF_INET) {
+               cm_info.ipv4 = true;
+               cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
+               cm_info.loc_port = ntohs(laddr->sin_port);
+
+               if (laddr->sin_addr.s_addr != INADDR_ANY)
+                       cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
+               else
+                       wildcard = true;
+
+       } else {
+               cm_info.ipv4 = false;
+               i40iw_copy_ip_ntohl(cm_info.loc_addr,
+                                   laddr6->sin6_addr.in6_u.u6_addr32);
+               cm_info.loc_port = ntohs(laddr6->sin6_port);
+               if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
+                       i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
+                                              &cm_info.vlan_id,
+                                              NULL);
+               else
+                       wildcard = true;
+       }
+       cm_info.backlog = backlog;
+       cm_info.cm_id = cm_id;
+
+       cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
+       if (!cm_listen_node) {
+               i40iw_pr_err("cm_listen_node == NULL\n");
+               return -ENOMEM;
+       }
+
+       cm_id->provider_data = cm_listen_node;
+
+       if (!cm_listen_node->reused_node) {
+               if (wildcard) {
+                       if (cm_info.ipv4)
+                               ret = i40iw_add_mqh_4(iwdev,
+                                                     &cm_info,
+                                                     cm_listen_node);
+                       else
+                               ret = i40iw_add_mqh_6(iwdev,
+                                                     &cm_info,
+                                                     cm_listen_node);
+                       if (ret)
+                               goto error;
+
+                       ret = i40iw_manage_apbvt(iwdev,
+                                                cm_info.loc_port,
+                                                I40IW_MANAGE_APBVT_ADD);
+
+                       if (ret)
+                               goto error;
+               } else {
+                       ret = i40iw_manage_qhash(iwdev,
+                                                &cm_info,
+                                                I40IW_QHASH_TYPE_TCP_SYN,
+                                                I40IW_QHASH_MANAGE_TYPE_ADD,
+                                                NULL,
+                                                true);
+                       if (ret)
+                               goto error;
+                       cm_listen_node->qhash_set = true;
+                       ret = i40iw_manage_apbvt(iwdev,
+                                                cm_info.loc_port,
+                                                I40IW_MANAGE_APBVT_ADD);
+                       if (ret)
+                               goto error;
+               }
+       }
+       cm_id->add_ref(cm_id);
+       cm_listen_node->cm_core->stats_listen_created++;
+       return 0;
+ error:
+       i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
+       return -EINVAL;
+}
+
+/**
+ * i40iw_destroy_listen - registered call to destroy listener
+ * @cm_id: cm information for passive connection
+ */
+int i40iw_destroy_listen(struct iw_cm_id *cm_id)
+{
+       struct i40iw_device *iwdev;
+
+       iwdev = to_iwdev(cm_id->device);
+       if (cm_id->provider_data)
+               i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
+       else
+               i40iw_pr_err("cm_id->provider_data was NULL\n");
+
+       cm_id->rem_ref(cm_id);
+
+       return 0;
+}
+
+/**
+ * i40iw_cm_event_connected - handle connected active node
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
+{
+       struct i40iw_qp *iwqp;
+       struct i40iw_device *iwdev;
+       struct i40iw_cm_node *cm_node;
+       struct i40iw_sc_dev *dev;
+       struct ib_qp_attr attr;
+       struct iw_cm_id *cm_id;
+       int status;
+       bool read0;
+
+       cm_node = event->cm_node;
+       cm_id = cm_node->cm_id;
+       iwqp = (struct i40iw_qp *)cm_id->provider_data;
+       iwdev = to_iwdev(iwqp->ibqp.device);
+       dev = &iwdev->sc_dev;
+
+       if (iwqp->destroyed) {
+               status = -ETIMEDOUT;
+               goto error;
+       }
+       i40iw_cm_init_tsa_conn(iwqp, cm_node);
+       read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
+       if (iwqp->page)
+               iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
+       dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
+       if (iwqp->page)
+               kunmap(iwqp->page);
+       status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0);
+       if (status)
+               i40iw_pr_err("send cm event\n");
+
+       memset(&attr, 0, sizeof(attr));
+       attr.qp_state = IB_QPS_RTS;
+       cm_node->qhash_set = false;
+       i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+
+       cm_node->accelerated = 1;
+       if (cm_node->accept_pend) {
+               if (!cm_node->listener)
+                       i40iw_pr_err("listener is null for passive node\n");
+               atomic_dec(&cm_node->listener->pend_accepts_cnt);
+               cm_node->accept_pend = 0;
+       }
+       return;
+
+error:
+       iwqp->cm_id = NULL;
+       cm_id->provider_data = NULL;
+       i40iw_send_cm_event(event->cm_node,
+                           cm_id,
+                           IW_CM_EVENT_CONNECT_REPLY,
+                           status);
+       cm_id->rem_ref(cm_id);
+       i40iw_rem_ref_cm_node(event->cm_node);
+}
+
+/**
+ * i40iw_cm_event_reset - handle reset
+ * @event: the info for cm_node of connection
+ */
+static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
+{
+       struct i40iw_cm_node *cm_node = event->cm_node;
+       struct iw_cm_id   *cm_id = cm_node->cm_id;
+       struct i40iw_qp *iwqp;
+
+       if (!cm_id)
+               return;
+
+       iwqp = cm_id->provider_data;
+       if (!iwqp)
+               return;
+
+       i40iw_debug(cm_node->dev,
+                   I40IW_DEBUG_CM,
+                   "reset event %p - cm_id = %p\n",
+                    event->cm_node, cm_id);
+       iwqp->cm_id = NULL;
+
+       i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
+       i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
+}
+
+/**
+ * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
+ * @work: pointer of cm event info.
+ */
+static void i40iw_cm_event_handler(struct work_struct *work)
+{
+       struct i40iw_cm_event *event = container_of(work,
+                                                   struct i40iw_cm_event,
+                                                   event_work);
+       struct i40iw_cm_node *cm_node;
+
+       if (!event || !event->cm_node || !event->cm_node->cm_core)
+               return;
+
+       cm_node = event->cm_node;
+
+       switch (event->type) {
+       case I40IW_CM_EVENT_MPA_REQ:
+               i40iw_send_cm_event(cm_node,
+                                   cm_node->cm_id,
+                                   IW_CM_EVENT_CONNECT_REQUEST,
+                                   0);
+               break;
+       case I40IW_CM_EVENT_RESET:
+               i40iw_cm_event_reset(event);
+               break;
+       case I40IW_CM_EVENT_CONNECTED:
+               if (!event->cm_node->cm_id ||
+                   (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
+                       break;
+               i40iw_cm_event_connected(event);
+               break;
+       case I40IW_CM_EVENT_MPA_REJECT:
+               if (!event->cm_node->cm_id ||
+                   (cm_node->state == I40IW_CM_STATE_OFFLOADED))
+                       break;
+               i40iw_send_cm_event(cm_node,
+                                   cm_node->cm_id,
+                                   IW_CM_EVENT_CONNECT_REPLY,
+                                   -ECONNREFUSED);
+               break;
+       case I40IW_CM_EVENT_ABORTED:
+               if (!event->cm_node->cm_id ||
+                   (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
+                       break;
+               i40iw_event_connect_error(event);
+               break;
+       default:
+               i40iw_pr_err("event type = %d\n", event->type);
+               break;
+       }
+
+       event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
+       i40iw_rem_ref_cm_node(event->cm_node);
+       kfree(event);
+}
+
+/**
+ * i40iw_cm_post_event - queue event request for worker thread
+ * @event: cm node's info for up event call
+ */
+static void i40iw_cm_post_event(struct i40iw_cm_event *event)
+{
+       atomic_inc(&event->cm_node->ref_count);
+       event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
+       INIT_WORK(&event->event_work, i40iw_cm_event_handler);
+
+       queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
new file mode 100644 (file)
index 0000000..5f8ceb4
--- /dev/null
@@ -0,0 +1,456 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_CM_H
+#define I40IW_CM_H
+
+#define QUEUE_EVENTS
+
+#define I40IW_MANAGE_APBVT_DEL 0
+#define I40IW_MANAGE_APBVT_ADD 1
+
+#define I40IW_MPA_REQUEST_ACCEPT  1
+#define I40IW_MPA_REQUEST_REJECT  2
+
+/* IETF MPA -- defines, enums, structs */
+#define IEFT_MPA_KEY_REQ  "MPA ID Req Frame"
+#define IEFT_MPA_KEY_REP  "MPA ID Rep Frame"
+#define IETF_MPA_KEY_SIZE 16
+#define IETF_MPA_VERSION  1
+#define IETF_MAX_PRIV_DATA_LEN 512
+#define IETF_MPA_FRAME_SIZE    20
+#define IETF_RTR_MSG_SIZE      4
+#define IETF_MPA_V2_FLAG       0x10
+#define SNDMARKER_SEQNMASK     0x000001FF
+
+#define I40IW_MAX_IETF_SIZE      32
+
+#define MPA_ZERO_PAD_LEN       4
+
+/* IETF RTR MSG Fields               */
+#define IETF_PEER_TO_PEER       0x8000
+#define IETF_FLPDU_ZERO_LEN     0x4000
+#define IETF_RDMA0_WRITE        0x8000
+#define IETF_RDMA0_READ         0x4000
+#define IETF_NO_IRD_ORD         0x3FFF
+
+/* HW-supported IRD sizes*/
+#define        I40IW_HW_IRD_SETTING_2  2
+#define        I40IW_HW_IRD_SETTING_4  4
+#define        I40IW_HW_IRD_SETTING_8  8
+#define        I40IW_HW_IRD_SETTING_16 16
+#define        I40IW_HW_IRD_SETTING_32 32
+#define        I40IW_HW_IRD_SETTING_64 64
+
+enum ietf_mpa_flags {
+       IETF_MPA_FLAGS_MARKERS = 0x80,  /* receive Markers */
+       IETF_MPA_FLAGS_CRC = 0x40,      /* receive Markers */
+       IETF_MPA_FLAGS_REJECT = 0x20,   /* Reject */
+};
+
+struct ietf_mpa_v1 {
+       u8 key[IETF_MPA_KEY_SIZE];
+       u8 flags;
+       u8 rev;
+       __be16 priv_data_len;
+       u8 priv_data[0];
+};
+
+#define ietf_mpa_req_resp_frame ietf_mpa_frame
+
+struct ietf_rtr_msg {
+       __be16 ctrl_ird;
+       __be16 ctrl_ord;
+};
+
+struct ietf_mpa_v2 {
+       u8 key[IETF_MPA_KEY_SIZE];
+       u8 flags;
+       u8 rev;
+       __be16 priv_data_len;
+       struct ietf_rtr_msg rtr_msg;
+       u8 priv_data[0];
+};
+
+struct i40iw_cm_node;
+enum i40iw_timer_type {
+       I40IW_TIMER_TYPE_SEND,
+       I40IW_TIMER_TYPE_RECV,
+       I40IW_TIMER_NODE_CLEANUP,
+       I40IW_TIMER_TYPE_CLOSE,
+};
+
+#define I40IW_PASSIVE_STATE_INDICATED    0
+#define I40IW_DO_NOT_SEND_RESET_EVENT    1
+#define I40IW_SEND_RESET_EVENT           2
+
+#define MAX_I40IW_IFS 4
+
+#define SET_ACK 0x1
+#define SET_SYN 0x2
+#define SET_FIN 0x4
+#define SET_RST 0x8
+
+#define TCP_OPTIONS_PADDING     3
+
+struct option_base {
+       u8 optionnum;
+       u8 length;
+};
+
+enum option_numbers {
+       OPTION_NUMBER_END,
+       OPTION_NUMBER_NONE,
+       OPTION_NUMBER_MSS,
+       OPTION_NUMBER_WINDOW_SCALE,
+       OPTION_NUMBER_SACK_PERM,
+       OPTION_NUMBER_SACK,
+       OPTION_NUMBER_WRITE0 = 0xbc
+};
+
+struct option_mss {
+       u8 optionnum;
+       u8 length;
+       __be16 mss;
+};
+
+struct option_windowscale {
+       u8 optionnum;
+       u8 length;
+       u8 shiftcount;
+};
+
+union all_known_options {
+       char as_end;
+       struct option_base as_base;
+       struct option_mss as_mss;
+       struct option_windowscale as_windowscale;
+};
+
+struct i40iw_timer_entry {
+       struct list_head list;
+       unsigned long timetosend;       /* jiffies */
+       struct i40iw_puda_buf *sqbuf;
+       u32 type;
+       u32 retrycount;
+       u32 retranscount;
+       u32 context;
+       u32 send_retrans;
+       int close_when_complete;
+};
+
+#define I40IW_DEFAULT_RETRYS   64
+#define I40IW_DEFAULT_RETRANS  8
+#define I40IW_DEFAULT_TTL      0x40
+#define I40IW_DEFAULT_RTT_VAR  0x6
+#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF
+#define I40IW_DEFAULT_REXMIT_THRESH 8
+
+#define I40IW_RETRY_TIMEOUT   HZ
+#define I40IW_SHORT_TIME      10
+#define I40IW_LONG_TIME       (2 * HZ)
+#define I40IW_MAX_TIMEOUT     ((unsigned long)(12 * HZ))
+
+#define I40IW_CM_HASHTABLE_SIZE         1024
+#define I40IW_CM_TCP_TIMER_INTERVAL     3000
+#define I40IW_CM_DEFAULT_MTU            1540
+#define I40IW_CM_DEFAULT_FRAME_CNT      10
+#define I40IW_CM_THREAD_STACK_SIZE      256
+#define I40IW_CM_DEFAULT_RCV_WND        64240
+#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc
+#define I40IW_CM_DEFAULT_RCV_WND_SCALE  2
+#define I40IW_CM_DEFAULT_FREE_PKTS      0x000A
+#define I40IW_CM_FREE_PKT_LO_WATERMARK  2
+
+#define I40IW_CM_DEFAULT_MSS   536
+
+#define I40IW_CM_DEF_SEQ       0x159bf75f
+#define I40IW_CM_DEF_LOCAL_ID  0x3b47
+
+#define I40IW_CM_DEF_SEQ2      0x18ed5740
+#define I40IW_CM_DEF_LOCAL_ID2 0xb807
+#define MAX_CM_BUFFER   (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN)
+
+typedef u32 i40iw_addr_t;
+
+#define i40iw_cm_tsa_context i40iw_qp_context
+
+struct i40iw_qp;
+
+/* cm node transition states */
+enum i40iw_cm_node_state {
+       I40IW_CM_STATE_UNKNOWN,
+       I40IW_CM_STATE_INITED,
+       I40IW_CM_STATE_LISTENING,
+       I40IW_CM_STATE_SYN_RCVD,
+       I40IW_CM_STATE_SYN_SENT,
+       I40IW_CM_STATE_ONE_SIDE_ESTABLISHED,
+       I40IW_CM_STATE_ESTABLISHED,
+       I40IW_CM_STATE_ACCEPTING,
+       I40IW_CM_STATE_MPAREQ_SENT,
+       I40IW_CM_STATE_MPAREQ_RCVD,
+       I40IW_CM_STATE_MPAREJ_RCVD,
+       I40IW_CM_STATE_OFFLOADED,
+       I40IW_CM_STATE_FIN_WAIT1,
+       I40IW_CM_STATE_FIN_WAIT2,
+       I40IW_CM_STATE_CLOSE_WAIT,
+       I40IW_CM_STATE_TIME_WAIT,
+       I40IW_CM_STATE_LAST_ACK,
+       I40IW_CM_STATE_CLOSING,
+       I40IW_CM_STATE_LISTENER_DESTROYED,
+       I40IW_CM_STATE_CLOSED
+};
+
+enum mpa_frame_version {
+       IETF_MPA_V1 = 1,
+       IETF_MPA_V2 = 2
+};
+
+enum mpa_frame_key {
+       MPA_KEY_REQUEST,
+       MPA_KEY_REPLY
+};
+
+enum send_rdma0 {
+       SEND_RDMA_READ_ZERO = 1,
+       SEND_RDMA_WRITE_ZERO = 2
+};
+
+enum i40iw_tcpip_pkt_type {
+       I40IW_PKT_TYPE_UNKNOWN,
+       I40IW_PKT_TYPE_SYN,
+       I40IW_PKT_TYPE_SYNACK,
+       I40IW_PKT_TYPE_ACK,
+       I40IW_PKT_TYPE_FIN,
+       I40IW_PKT_TYPE_RST
+};
+
+/* CM context params */
+struct i40iw_cm_tcp_context {
+       u8 client;
+
+       u32 loc_seq_num;
+       u32 loc_ack_num;
+       u32 rem_ack_num;
+       u32 rcv_nxt;
+
+       u32 loc_id;
+       u32 rem_id;
+
+       u32 snd_wnd;
+       u32 max_snd_wnd;
+
+       u32 rcv_wnd;
+       u32 mss;
+       u8 snd_wscale;
+       u8 rcv_wscale;
+
+       struct timeval sent_ts;
+};
+
+enum i40iw_cm_listener_state {
+       I40IW_CM_LISTENER_PASSIVE_STATE = 1,
+       I40IW_CM_LISTENER_ACTIVE_STATE = 2,
+       I40IW_CM_LISTENER_EITHER_STATE = 3
+};
+
+struct i40iw_cm_listener {
+       struct list_head list;
+       struct i40iw_cm_core *cm_core;
+       u8 loc_mac[ETH_ALEN];
+       u32 loc_addr[4];
+       u16 loc_port;
+       u32 map_loc_addr[4];
+       u16 map_loc_port;
+       struct iw_cm_id *cm_id;
+       atomic_t ref_count;
+       struct i40iw_device *iwdev;
+       atomic_t pend_accepts_cnt;
+       int backlog;
+       enum i40iw_cm_listener_state listener_state;
+       u32 reused_node;
+       u8 user_pri;
+       u16 vlan_id;
+       bool qhash_set;
+       bool ipv4;
+       struct list_head child_listen_list;
+
+};
+
+struct i40iw_kmem_info {
+       void *addr;
+       u32 size;
+};
+
+/* per connection node and node state information */
+struct i40iw_cm_node {
+       u32 loc_addr[4], rem_addr[4];
+       u16 loc_port, rem_port;
+       u32 map_loc_addr[4], map_rem_addr[4];
+       u16 map_loc_port, map_rem_port;
+       u16 vlan_id;
+       enum i40iw_cm_node_state state;
+       u8 loc_mac[ETH_ALEN];
+       u8 rem_mac[ETH_ALEN];
+       atomic_t ref_count;
+       struct i40iw_qp *iwqp;
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_cm_tcp_context tcp_cntxt;
+       struct i40iw_cm_core *cm_core;
+       struct i40iw_cm_node *loopbackpartner;
+       struct i40iw_timer_entry *send_entry;
+       struct i40iw_timer_entry *close_entry;
+       spinlock_t retrans_list_lock; /* cm transmit packet */
+       enum send_rdma0 send_rdma0_op;
+       u16 ird_size;
+       u16 ord_size;
+       u16     mpav2_ird_ord;
+       struct iw_cm_id *cm_id;
+       struct list_head list;
+       int accelerated;
+       struct i40iw_cm_listener *listener;
+       int apbvt_set;
+       int accept_pend;
+       struct list_head timer_entry;
+       struct list_head reset_entry;
+       atomic_t passive_state;
+       bool qhash_set;
+       u8 user_pri;
+       bool ipv4;
+       bool snd_mark_en;
+       u16 lsmm_size;
+       enum mpa_frame_version mpa_frame_rev;
+       struct i40iw_kmem_info pdata;
+       union {
+               struct ietf_mpa_v1 mpa_frame;
+               struct ietf_mpa_v2 mpa_v2_frame;
+       };
+
+       u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN];
+       struct i40iw_kmem_info mpa_hdr;
+};
+
+/* structure for client or CM to fill when making CM api calls. */
+/*     - only need to set relevant data, based on op. */
+struct i40iw_cm_info {
+       struct iw_cm_id *cm_id;
+       u16 loc_port;
+       u16 rem_port;
+       u32 loc_addr[4];
+       u32 rem_addr[4];
+       u16 map_loc_port;
+       u16 map_rem_port;
+       u32 map_loc_addr[4];
+       u32 map_rem_addr[4];
+       u16 vlan_id;
+       int backlog;
+       u16 user_pri;
+       bool ipv4;
+};
+
+/* CM event codes */
+enum i40iw_cm_event_type {
+       I40IW_CM_EVENT_UNKNOWN,
+       I40IW_CM_EVENT_ESTABLISHED,
+       I40IW_CM_EVENT_MPA_REQ,
+       I40IW_CM_EVENT_MPA_CONNECT,
+       I40IW_CM_EVENT_MPA_ACCEPT,
+       I40IW_CM_EVENT_MPA_REJECT,
+       I40IW_CM_EVENT_MPA_ESTABLISHED,
+       I40IW_CM_EVENT_CONNECTED,
+       I40IW_CM_EVENT_RESET,
+       I40IW_CM_EVENT_ABORTED
+};
+
+/* event to post to CM event handler */
+struct i40iw_cm_event {
+       enum i40iw_cm_event_type type;
+       struct i40iw_cm_info cm_info;
+       struct work_struct event_work;
+       struct i40iw_cm_node *cm_node;
+};
+
+struct i40iw_cm_core {
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_dev *dev;
+
+       struct list_head listen_nodes;
+       struct list_head connected_nodes;
+
+       struct timer_list tcp_timer;
+
+       struct workqueue_struct *event_wq;
+       struct workqueue_struct *disconn_wq;
+
+       spinlock_t ht_lock; /* manage hash table */
+       spinlock_t listen_list_lock; /* listen list */
+
+       u64     stats_nodes_created;
+       u64     stats_nodes_destroyed;
+       u64     stats_listen_created;
+       u64     stats_listen_destroyed;
+       u64     stats_listen_nodes_created;
+       u64     stats_listen_nodes_destroyed;
+       u64     stats_loopbacks;
+       u64     stats_accepts;
+       u64     stats_rejects;
+       u64     stats_connect_errs;
+       u64     stats_passive_errs;
+       u64     stats_pkt_retrans;
+       u64     stats_backlog_drops;
+};
+
+int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
+                           struct i40iw_puda_buf *sqbuf,
+                           enum i40iw_timer_type type,
+                           int send_retrans,
+                           int close_when_complete);
+
+int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_reject(struct iw_cm_id *, const void *, u8);
+int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
+int i40iw_create_listen(struct iw_cm_id *, int);
+int i40iw_destroy_listen(struct iw_cm_id *);
+
+int i40iw_cm_start(struct i40iw_device *);
+int i40iw_cm_stop(struct i40iw_device *);
+
+int i40iw_arp_table(struct i40iw_device *iwdev,
+                   u32 *ip_addr,
+                   bool ipv4,
+                   u8 *mac_addr,
+                   u32 action);
+
+#endif /* I40IW_CM_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
new file mode 100644 (file)
index 0000000..f05802b
--- /dev/null
@@ -0,0 +1,4743 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_insert_wqe_hdr - write wqe header
+ * @wqe: cqp wqe for header
+ * @header: header for the cqp wqe
+ */
+static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+{
+       wmb();            /* make sure WQE is populated before polarity is set */
+       set_64bit_val(wqe, 24, header);
+}
+
+/**
+ * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
+ * @cqp: struct for cqp hw
+ * @val: cqp tail register value
+ * @tail:wqtail register value
+ * @error: cqp processing err
+ */
+static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
+                                         u32 *val,
+                                         u32 *tail,
+                                         u32 *error)
+{
+       if (cqp->dev->is_pf) {
+               *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
+               *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
+               *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
+       } else {
+               *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
+               *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
+               *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
+       }
+}
+
+/**
+ * i40iw_cqp_poll_registers - poll cqp registers
+ * @cqp: struct for cqp hw
+ * @tail:wqtail register value
+ * @count: how many times to try for completion
+ */
+static enum i40iw_status_code i40iw_cqp_poll_registers(
+                                               struct i40iw_sc_cqp *cqp,
+                                               u32 tail,
+                                               u32 count)
+{
+       u32 i = 0;
+       u32 newtail, error, val;
+
+       while (i < count) {
+               i++;
+               i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
+               if (error) {
+                       error = (cqp->dev->is_pf) ?
+                                i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
+                                i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+                       return I40IW_ERR_CQP_COMPL_ERROR;
+               }
+               if (newtail != tail) {
+                       /* SUCCESS */
+                       I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+                       return 0;
+               }
+               udelay(I40IW_SLEEP_COUNT);
+       }
+       return I40IW_ERR_TIMEOUT;
+}
+
+/**
+ * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
+ * @buf: ptr to fpm commit buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ *
+ * parses fpm commit info and copy base value
+ * of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
+                               u64 *buf,
+                               struct i40iw_hmc_obj_info *info)
+{
+       u64 temp;
+       u32 i, j;
+       u32 low;
+
+       /* copy base values in obj_info */
+       for (i = I40IW_HMC_IW_QP, j = 0;
+                       i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+               get_64bit_val(buf, j, &temp);
+               info[i].base = RS_64_1(temp, 32) * 512;
+               low = (u32)(temp);
+               if (low)
+                       info[i].cnt = low;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
+ * @buf: ptr to fpm query buffer
+ * @info: ptr to i40iw_hmc_obj_info struct
+ * @hmc_fpm_misc: ptr to fpm data
+ *
+ * parses fpm query buffer and copy max_cnt and
+ * size value of hmc objects in hmc_info
+ */
+static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
+                               u64 *buf,
+                               struct i40iw_hmc_info *hmc_info,
+                               struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
+{
+       u64 temp;
+       struct i40iw_hmc_obj_info *obj_info;
+       u32 i, j, size;
+       u16 max_pe_sds;
+
+       obj_info = hmc_info->hmc_obj;
+
+       get_64bit_val(buf, 0, &temp);
+       hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
+       max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
+
+       /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
+       if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
+               max_pe_sds--;
+       hmc_fpm_misc->max_sds = max_pe_sds;
+       hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
+
+       for (i = I40IW_HMC_IW_QP, j = 8;
+            i <= I40IW_HMC_IW_ARP; i++, j += 8) {
+               get_64bit_val(buf, j, &temp);
+               if (i == I40IW_HMC_IW_QP)
+                       obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
+               else if (i == I40IW_HMC_IW_CQ)
+                       obj_info[i].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
+               else
+                       obj_info[i].max_cnt = (u32)temp;
+
+               size = (u32)RS_64_1(temp, 32);
+               obj_info[i].size = ((u64)1 << size);
+       }
+       for (i = I40IW_HMC_IW_MR, j = 48;
+                       i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
+               get_64bit_val(buf, j, &temp);
+               obj_info[i].max_cnt = (u32)temp;
+               size = (u32)RS_64_1(temp, 32);
+               obj_info[i].size = LS_64_1(1, size);
+       }
+
+       get_64bit_val(buf, 120, &temp);
+       hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
+       get_64bit_val(buf, 120, &temp);
+       hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
+       get_64bit_val(buf, 120, &temp);
+       hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
+       get_64bit_val(buf, 64, &temp);
+       hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
+       if (!hmc_fpm_misc->xf_block_size)
+               return I40IW_ERR_INVALID_SIZE;
+       get_64bit_val(buf, 80, &temp);
+       hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
+       if (!hmc_fpm_misc->q1_block_size)
+               return I40IW_ERR_INVALID_SIZE;
+       return 0;
+}
+
+/**
+ * i40iw_sc_pd_init - initialize sc pd struct
+ * @dev: sc device struct
+ * @pd: sc pd ptr
+ * @pd_id: pd_id for allocated pd
+ */
+static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
+                            struct i40iw_sc_pd *pd,
+                            u16 pd_id)
+{
+       pd->size = sizeof(*pd);
+       pd->pd_id = pd_id;
+       pd->dev = dev;
+}
+
+/**
+ * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
+ * @wqsize: size of the wq (sq, rq, srq) to encoded_size
+ * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
+ */
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
+{
+       u8 encoded_size = 0;
+
+       /* cqp sq's hw coded value starts from 1 for size of 4
+        * while it starts from 0 for qp' wq's.
+        */
+       if (cqpsq)
+               encoded_size = 1;
+       wqsize >>= 2;
+       while (wqsize >>= 1)
+               encoded_size++;
+       return encoded_size;
+}
+
+/**
+ * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
+ * @cqp: IWARP control queue pair pointer
+ * @info: IWARP control queue pair init info pointer
+ *
+ * Initializes the object and context buffers for a control Queue Pair.
+ */
+static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
+                                               struct i40iw_cqp_init_info *info)
+{
+       u8 hw_sq_size;
+
+       if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
+           (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
+           ((info->sq_size & (info->sq_size - 1))))
+               return I40IW_ERR_INVALID_SIZE;
+
+       hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
+       cqp->size = sizeof(*cqp);
+       cqp->sq_size = info->sq_size;
+       cqp->hw_sq_size = hw_sq_size;
+       cqp->sq_base = info->sq;
+       cqp->host_ctx = info->host_ctx;
+       cqp->sq_pa = info->sq_pa;
+       cqp->host_ctx_pa = info->host_ctx_pa;
+       cqp->dev = info->dev;
+       cqp->struct_ver = info->struct_ver;
+       cqp->scratch_array = info->scratch_array;
+       cqp->polarity = 0;
+       cqp->en_datacenter_tcp = info->en_datacenter_tcp;
+       cqp->enabled_vf_count = info->enabled_vf_count;
+       cqp->hmc_profile = info->hmc_profile;
+       info->dev->cqp = cqp;
+
+       I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
+       i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+                   "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
+                   __func__, cqp->sq_size, cqp->hw_sq_size,
+                   cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
+       return 0;
+}
+
+/**
+ * i40iw_sc_cqp_create - create cqp during bringup
+ * @cqp: struct for cqp hw
+ * @disable_pfpdus: if pfpdu to be disabled
+ * @maj_err: If error, major err number
+ * @min_err: If error, minor err number
+ */
+static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
+                                                 bool disable_pfpdus,
+                                                 u16 *maj_err,
+                                                 u16 *min_err)
+{
+       u64 temp;
+       u32 cnt = 0, p1, p2, val = 0, err_code;
+       enum i40iw_status_code ret_code;
+
+       ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
+                                         &cqp->sdbuf,
+                                         128,
+                                         I40IW_SD_BUF_ALIGNMENT);
+
+       if (ret_code)
+               goto exit;
+
+       temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
+              LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
+
+       if (disable_pfpdus)
+               temp |= LS_64(1, I40IW_CQPHC_DISABLE_PFPDUS);
+
+       set_64bit_val(cqp->host_ctx, 0, temp);
+       set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
+       temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
+              LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
+       set_64bit_val(cqp->host_ctx, 16, temp);
+       set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
+       set_64bit_val(cqp->host_ctx, 32, 0);
+       set_64bit_val(cqp->host_ctx, 40, 0);
+       set_64bit_val(cqp->host_ctx, 48, 0);
+       set_64bit_val(cqp->host_ctx, 56, 0);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
+                       cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
+
+       p1 = RS_32_1(cqp->host_ctx_pa, 32);
+       p2 = (u32)cqp->host_ctx_pa;
+
+       if (cqp->dev->is_pf) {
+               i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
+               i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
+       } else {
+               i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
+               i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
+       }
+       do {
+               if (cnt++ > I40IW_DONE_COUNT) {
+                       i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+                       ret_code = I40IW_ERR_TIMEOUT;
+                       /*
+                        * read PFPE_CQPERRORCODES register to get the minor
+                        * and major error code
+                        */
+                       if (cqp->dev->is_pf)
+                               err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
+                       else
+                               err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
+                       *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
+                       *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
+                       goto exit;
+               }
+               udelay(I40IW_SLEEP_COUNT);
+               if (cqp->dev->is_pf)
+                       val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
+               else
+                       val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
+       } while (!val);
+
+exit:
+       if (!ret_code)
+               cqp->process_cqp_sds = i40iw_update_sds_noccq;
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_cqp_post_sq - post of cqp's sq
+ * @cqp: struct for cqp hw
+ */
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
+{
+       if (cqp->dev->is_pf)
+               i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+       else
+               i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
+
+       i40iw_debug(cqp->dev,
+                   I40IW_DEBUG_WQE,
+                   "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
+                   __func__,
+                   cqp->sq_ring.head,
+                   cqp->sq_ring.tail,
+                   cqp->sq_ring.size);
+}
+
+/**
+ * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
+ * @cqp: struct for cqp hw
+ * @wqe_idx: we index of cqp ring
+ */
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
+{
+       u64 *wqe = NULL;
+       u32     wqe_idx;
+       enum i40iw_status_code ret_code;
+
+       if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
+               i40iw_debug(cqp->dev,
+                           I40IW_DEBUG_WQE,
+                           "%s: ring is full head %x tail %x size %x\n",
+                           __func__,
+                           cqp->sq_ring.head,
+                           cqp->sq_ring.tail,
+                           cqp->sq_ring.size);
+               return NULL;
+       }
+       I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code);
+       if (ret_code)
+               return NULL;
+       if (!wqe_idx)
+               cqp->polarity = !cqp->polarity;
+
+       wqe = cqp->sq_base[wqe_idx].elem;
+       cqp->scratch_array[wqe_idx] = scratch;
+       I40IW_CQP_INIT_WQE(wqe);
+
+       return wqe;
+}
+
+/**
+ * i40iw_sc_cqp_destroy - destroy cqp during close
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
+{
+       u32 cnt = 0, val = 1;
+       enum i40iw_status_code ret_code = 0;
+       u32 cqpstat_addr;
+
+       if (cqp->dev->is_pf) {
+               i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
+               i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
+               cqpstat_addr = I40E_PFPE_CCQPSTATUS;
+       } else {
+               i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
+               i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
+               cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
+       }
+       do {
+               if (cnt++ > I40IW_DONE_COUNT) {
+                       ret_code = I40IW_ERR_TIMEOUT;
+                       break;
+               }
+               udelay(I40IW_SLEEP_COUNT);
+               val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
+       } while (val);
+
+       i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_ccq_arm - enable intr for control cq
+ * @ccq: ccq sc struct
+ */
+static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
+{
+       u64 temp_val;
+       u16 sw_cq_sel;
+       u8 arm_next_se;
+       u8 arm_seq_num;
+
+       /* write to cq doorbell shadow area */
+       /* arm next se should always be zero */
+       get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
+
+       sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+       arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+
+       arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+       arm_seq_num++;
+
+       temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+                  LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+                  LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+                  LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
+
+       set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
+
+       wmb();       /* make sure shadow area is updated before arming */
+
+       if (ccq->dev->is_pf)
+               i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
+       else
+               i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
+}
+
+/**
+ * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
+ * @ccq: ccq sc struct
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
+                                       struct i40iw_sc_cq *ccq,
+                                       struct i40iw_ccq_cqe_info *info)
+{
+       u64 qp_ctx, temp, temp1;
+       u64 *cqe;
+       struct i40iw_sc_cqp *cqp;
+       u32 wqe_idx;
+       u8 polarity;
+       enum i40iw_status_code ret_code = 0;
+
+       if (ccq->cq_uk.avoid_mem_cflct)
+               cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
+       else
+               cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
+
+       get_64bit_val(cqe, 24, &temp);
+       polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
+       if (polarity != ccq->cq_uk.polarity)
+               return I40IW_ERR_QUEUE_EMPTY;
+
+       get_64bit_val(cqe, 8, &qp_ctx);
+       cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
+       info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
+       info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+       if (info->error) {
+               info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
+               info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
+       }
+       wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
+       info->scratch = cqp->scratch_array[wqe_idx];
+
+       get_64bit_val(cqe, 16, &temp1);
+       info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
+       get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
+       info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
+       info->cqp = cqp;
+
+       /*  move the head for cq */
+       I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
+       if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
+               ccq->cq_uk.polarity ^= 1;
+
+       /* update cq tail in cq shadow memory also */
+       I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
+       set_64bit_val(ccq->cq_uk.shadow_area,
+                     0,
+                     I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
+       wmb(); /* write shadow area before tail */
+       I40IW_RING_MOVE_TAIL(cqp->sq_ring);
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
+ * @cqp: struct for cqp hw
+ * @op_code: cqp opcode for completion
+ * @info: completion q entry to return
+ */
+static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u8 op_code,
+                                       struct i40iw_ccq_cqe_info *compl_info)
+{
+       struct i40iw_ccq_cqe_info info;
+       struct i40iw_sc_cq *ccq;
+       enum i40iw_status_code ret_code = 0;
+       u32 cnt = 0;
+
+       memset(&info, 0, sizeof(info));
+       ccq = cqp->dev->ccq;
+       while (1) {
+               if (cnt++ > I40IW_DONE_COUNT)
+                       return I40IW_ERR_TIMEOUT;
+
+               if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
+                       udelay(I40IW_SLEEP_COUNT);
+                       continue;
+               }
+
+               if (info.error) {
+                       ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+                       break;
+               }
+               /* check if opcode is cq create */
+               if (op_code != info.op_code) {
+                       i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
+                                   "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
+                                   __func__, op_code, info.op_code);
+               }
+               /* success, exit out of the loop */
+               if (op_code == info.op_code)
+                       break;
+       }
+
+       if (compl_info)
+               memcpy(compl_info, &info, sizeof(*compl_info));
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_push_page - Handle push page
+ * @cqp: struct for cqp hw
+ * @info: push page info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_push_page(
+                               struct i40iw_sc_cqp *cqp,
+                               struct i40iw_cqp_manage_push_page_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
+               return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, info->qs_handle);
+
+       header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
+                LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+                LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table - manage of function table
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @vf_index: vf index for cqp
+ * @free_pm_fcn: function number
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
+                               struct i40iw_sc_cqp *cqp,
+                               u64 scratch,
+                               u8 vf_index,
+                               bool free_pm_fcn,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       if (vf_index >= I40IW_MAX_VF_PER_PF)
+               return I40IW_ERR_INVALID_VF_ID;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
+                LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
+                LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_profile_type: type of profile to set
+ * @vf_num: vf number for profile
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
+                               struct i40iw_sc_cqp *cqp,
+                               u64 scratch,
+                               u8 hmc_profile_type,
+                               u8 vf_num, bool post_sq,
+                               bool poll_registers)
+{
+       u64 *wqe;
+       u64 header;
+       u32 val, tail, error;
+       enum i40iw_status_code ret_code = 0;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16,
+                     (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
+                               LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
+
+       header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
+                      LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+       if (error)
+               return I40IW_ERR_CQP_COMPL_ERROR;
+
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+               if (poll_registers)
+                       ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
+               else
+                       ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+                                                                I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+                                                                NULL);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
+{
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @commit_fpm_mem; Memory for fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_commit_fpm_values(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u64 scratch,
+                                       u8 hmc_fn_id,
+                                       struct i40iw_dma_mem *commit_fpm_mem,
+                                       bool post_sq,
+                                       u8 wait_type)
+{
+       u64 *wqe;
+       u64 header;
+       u32 tail, val, error;
+       enum i40iw_status_code ret_code = 0;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, hmc_fn_id);
+       set_64bit_val(wqe, 32, commit_fpm_mem->pa);
+
+       header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+       if (error)
+               return I40IW_ERR_CQP_COMPL_ERROR;
+
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+
+               if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+                       ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+               else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+                       ret_code = i40iw_sc_commit_fpm_values_done(cqp);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
+ * @cqp: struct for cqp hw
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
+{
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
+}
+
+/**
+ * i40iw_sc_query_fpm_values - cqp wqe query fpm values
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @query_fpm_mem: memory for return fpm values
+ * @post_sq: flag for cqp db to ring
+ * @wait_type: poll ccq or cqp registers for cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_query_fpm_values(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u64 scratch,
+                                       u8 hmc_fn_id,
+                                       struct i40iw_dma_mem *query_fpm_mem,
+                                       bool post_sq,
+                                       u8 wait_type)
+{
+       u64 *wqe;
+       u64 header;
+       u32 tail, val, error;
+       enum i40iw_status_code ret_code = 0;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, hmc_fn_id);
+       set_64bit_val(wqe, 32, query_fpm_mem->pa);
+
+       header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       /* read the tail from CQP_TAIL register */
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+
+       if (error)
+               return I40IW_ERR_CQP_COMPL_ERROR;
+
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+               if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
+                       ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+               else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
+                       ret_code = i40iw_sc_query_fpm_values_done(cqp);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
+ * @cqp: struct for cqp hw
+ * @info: arp entry information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
+                               struct i40iw_sc_cqp *cqp,
+                               struct i40iw_add_arp_cache_entry_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 temp, header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 8, info->reach_max);
+
+       temp = info->mac_addr[5] |
+              LS_64_1(info->mac_addr[4], 8) |
+              LS_64_1(info->mac_addr[3], 16) |
+              LS_64_1(info->mac_addr[2], 24) |
+              LS_64_1(info->mac_addr[1], 32) |
+              LS_64_1(info->mac_addr[0], 40);
+
+       set_64bit_val(wqe, 16, temp);
+
+       header = info->arp_index |
+                LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+                LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
+                LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_del_arp_cache_entry - dele arp cache entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u64 scratch,
+                                       u16 arp_index,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       header = arp_index |
+                LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @arp_index: arp index to delete arp entry
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
+                               struct i40iw_sc_cqp *cqp,
+                               u64 scratch,
+                               u16 arp_index,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       header = arp_index |
+                LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
+ * @cqp: struct for cqp hw
+ * @info: info for apbvt entry to add or delete
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
+                               struct i40iw_sc_cqp *cqp,
+                               struct i40iw_apbvt_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, info->port);
+
+       header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
+ * @cqp: struct for cqp hw
+ * @info: info for quad hash to manage
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ *
+ * This is called before connection establishment is started. For passive connections, when
+ * listener is created, it will call with entry type of  I40IW_QHASH_TYPE_TCP_SYN with local
+ * ip address and tcp port. When SYN is received (passive connections) or
+ * sent (active connections), this routine is called with entry type of
+ * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
+ *
+ * When iwarp connection is done and its state moves to RTS, the quad hash entry in
+ * the hardware will point to iwarp's qp number and requires no calls from the driver.
+ */
+static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
+                                       struct i40iw_sc_cqp *cqp,
+                                       struct i40iw_qhash_table_info *info,
+                                       u64 scratch,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       u64 qw1 = 0;
+       u64 qw2 = 0;
+       u64 temp;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       temp = info->mac_addr[5] |
+               LS_64_1(info->mac_addr[4], 8) |
+               LS_64_1(info->mac_addr[3], 16) |
+               LS_64_1(info->mac_addr[2], 24) |
+               LS_64_1(info->mac_addr[1], 32) |
+               LS_64_1(info->mac_addr[0], 40);
+
+       set_64bit_val(wqe, 0, temp);
+
+       qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
+             LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
+       if (info->ipv4_valid) {
+               set_64bit_val(wqe,
+                             48,
+                             LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+       } else {
+               set_64bit_val(wqe,
+                             56,
+                             LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+                             LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+
+               set_64bit_val(wqe,
+                             48,
+                             LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+                             LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+       }
+       qw2 = LS_64(cqp->dev->qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
+       if (info->vlan_valid)
+               qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
+       set_64bit_val(wqe, 16, qw2);
+       if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+               qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
+               if (!info->ipv4_valid) {
+                       set_64bit_val(wqe,
+                                     40,
+                                     LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
+                                     LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
+                       set_64bit_val(wqe,
+                                     32,
+                                     LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
+                                     LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
+               } else {
+                       set_64bit_val(wqe,
+                                     32,
+                                     LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
+               }
+       }
+
+       set_64bit_val(wqe, 8, qw1);
+       temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
+              LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
+              LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
+              LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
+              LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
+              LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
+
+       i40iw_insert_wqe_hdr(wqe, temp);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u64 scratch,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
+ * @cqp: struct for cqp hw
+ * @info:mac addr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
+                               struct i40iw_sc_cqp *cqp,
+                               struct i40iw_local_mac_ipaddr_entry_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 temp, header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       temp = info->mac_addr[5] |
+               LS_64_1(info->mac_addr[4], 8) |
+               LS_64_1(info->mac_addr[3], 16) |
+               LS_64_1(info->mac_addr[2], 24) |
+               LS_64_1(info->mac_addr[1], 32) |
+               LS_64_1(info->mac_addr[0], 40);
+
+       set_64bit_val(wqe, 32, temp);
+
+       header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+                LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @entry_idx: index of mac entry
+ * @ ignore_ref_count: to force mac adde delete
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
+                               struct i40iw_sc_cqp *cqp,
+                               u64 scratch,
+                               u8 entry_idx,
+                               u8 ignore_ref_count,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
+                LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+                LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_cqp_nop - send a nop wqe
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
+                                              u64 scratch,
+                                              bool post_sq)
+{
+       u64 *wqe;
+       u64 header;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_ceq_init - initialize ceq
+ * @ceq: ceq sc structure
+ * @info: ceq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
+                                               struct i40iw_ceq_init_info *info)
+{
+       u32 pble_obj_cnt;
+
+       if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
+           (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
+               return I40IW_ERR_INVALID_SIZE;
+
+       if (info->ceq_id >= I40IW_MAX_CEQID)
+               return I40IW_ERR_INVALID_CEQ_ID;
+
+       pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       ceq->size = sizeof(*ceq);
+       ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
+       ceq->ceq_id = info->ceq_id;
+       ceq->dev = info->dev;
+       ceq->elem_cnt = info->elem_cnt;
+       ceq->ceq_elem_pa = info->ceqe_pa;
+       ceq->virtual_map = info->virtual_map;
+
+       ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
+       ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
+       ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
+
+       ceq->tph_en = info->tph_en;
+       ceq->tph_val = info->tph_val;
+       ceq->polarity = 1;
+       I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
+       ceq->dev->ceq[info->ceq_id] = ceq;
+
+       return 0;
+}
+
+/**
+ * i40iw_sc_ceq_create - create ceq wqe
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
+                                                 u64 scratch,
+                                                 bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+
+       cqp = ceq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, ceq->elem_cnt);
+       set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
+       set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
+       set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+       header = ceq->ceq_id |
+                LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+                LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+                LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
+{
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = ceq->dev->cqp;
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
+ * @ceq: ceq sc structure
+ */
+static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
+{
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = ceq->dev->cqp;
+       cqp->process_cqp_sds = i40iw_update_sds_noccq;
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
+}
+
+/**
+ * i40iw_sc_cceq_create - create cceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
+{
+       enum i40iw_status_code ret_code;
+
+       ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
+       if (!ret_code)
+               ret_code = i40iw_sc_cceq_create_done(ceq);
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_ceq_destroy - destroy ceq
+ * @ceq: ceq sc structure
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
+                                                  u64 scratch,
+                                                  bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+
+       cqp = ceq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, ceq->elem_cnt);
+       set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
+       header = ceq->ceq_id |
+                LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
+                LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
+                LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_process_ceq - process ceq
+ * @dev: sc device struct
+ * @ceq: ceq sc structure
+ */
+static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
+{
+       u64 temp;
+       u64 *ceqe;
+       struct i40iw_sc_cq *cq = NULL;
+       u8 polarity;
+
+       ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
+       get_64bit_val(ceqe, 0, &temp);
+       polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
+       if (polarity != ceq->polarity)
+               return cq;
+
+       cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
+
+       I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
+       if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
+               ceq->polarity ^= 1;
+
+       if (dev->is_pf)
+               i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
+       else
+               i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
+
+       return cq;
+}
+
+/**
+ * i40iw_sc_aeq_init - initialize aeq
+ * @aeq: aeq structure ptr
+ * @info: aeq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
+                                               struct i40iw_aeq_init_info *info)
+{
+       u32 pble_obj_cnt;
+
+       if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
+           (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
+               return I40IW_ERR_INVALID_SIZE;
+       pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       aeq->size = sizeof(*aeq);
+       aeq->polarity = 1;
+       aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
+       aeq->dev = info->dev;
+       aeq->elem_cnt = info->elem_cnt;
+
+       aeq->aeq_elem_pa = info->aeq_elem_pa;
+       I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
+       info->dev->aeq = aeq;
+
+       aeq->virtual_map = info->virtual_map;
+       aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
+       aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
+       aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
+       info->dev->aeq = aeq;
+       return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create - create aeq
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
+                                                 u64 scratch,
+                                                 bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       cqp = aeq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, aeq->elem_cnt);
+       set_64bit_val(wqe, 32,
+                     (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
+       set_64bit_val(wqe, 48,
+                     (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
+
+       header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+                LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_aeq_destroy - destroy aeq during close
+ * @aeq: aeq structure ptr
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
+                                                  u64 scratch,
+                                                  bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       cqp = aeq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, aeq->elem_cnt);
+       set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
+       header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
+                LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_get_next_aeqe - get next aeq entry
+ * @aeq: aeq structure ptr
+ * @info: aeqe info to be returned
+ */
+static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
+                                                    struct i40iw_aeqe_info *info)
+{
+       u64 temp, compl_ctx;
+       u64 *aeqe;
+       u16 wqe_idx;
+       u8 ae_src;
+       u8 polarity;
+
+       aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
+       get_64bit_val(aeqe, 0, &compl_ctx);
+       get_64bit_val(aeqe, 8, &temp);
+       polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
+
+       if (aeq->polarity != polarity)
+               return I40IW_ERR_QUEUE_EMPTY;
+
+       i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
+
+       ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
+       wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
+       info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
+       info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
+       info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
+       info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
+       info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
+       info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
+       switch (ae_src) {
+       case I40IW_AE_SOURCE_RQ:
+       case I40IW_AE_SOURCE_RQ_0011:
+               info->qp = true;
+               info->wqe_idx = wqe_idx;
+               info->compl_ctx = compl_ctx;
+               break;
+       case I40IW_AE_SOURCE_CQ:
+       case I40IW_AE_SOURCE_CQ_0110:
+       case I40IW_AE_SOURCE_CQ_1010:
+       case I40IW_AE_SOURCE_CQ_1110:
+               info->cq = true;
+               info->compl_ctx = LS_64_1(compl_ctx, 1);
+               break;
+       case I40IW_AE_SOURCE_SQ:
+       case I40IW_AE_SOURCE_SQ_0111:
+               info->qp = true;
+               info->sq = true;
+               info->wqe_idx = wqe_idx;
+               info->compl_ctx = compl_ctx;
+               break;
+       case I40IW_AE_SOURCE_IN_RR_WR:
+       case I40IW_AE_SOURCE_IN_RR_WR_1011:
+               info->qp = true;
+               info->compl_ctx = compl_ctx;
+               info->in_rdrsp_wr = true;
+               break;
+       case I40IW_AE_SOURCE_OUT_RR:
+       case I40IW_AE_SOURCE_OUT_RR_1111:
+               info->qp = true;
+               info->compl_ctx = compl_ctx;
+               info->out_rdrsp = true;
+               break;
+       default:
+               break;
+       }
+       I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
+       if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
+               aeq->polarity ^= 1;
+       return 0;
+}
+
+/**
+ * i40iw_sc_repost_aeq_entries - repost completed aeq entries
+ * @dev: sc device struct
+ * @count: allocate count
+ */
+static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
+                                                         u32 count)
+{
+       if (count > I40IW_MAX_AEQ_ALLOCATE_COUNT)
+               return I40IW_ERR_INVALID_SIZE;
+
+       if (dev->is_pf)
+               i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
+       else
+               i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
+
+       return 0;
+}
+
+/**
+ * i40iw_sc_aeq_create_done - create aeq
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
+{
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = aeq->dev->cqp;
+       return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_aeq_destroy_done - destroy of aeq during close
+ * @aeq: aeq structure ptr
+ */
+static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
+{
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = aeq->dev->cqp;
+       return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_init - initialize control cq
+ * @cq: sc's cq ctruct
+ * @info: info for control cq initialization
+ */
+static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
+                                               struct i40iw_ccq_init_info *info)
+{
+       u32 pble_obj_cnt;
+
+       if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
+               return I40IW_ERR_INVALID_SIZE;
+
+       if (info->ceq_id > I40IW_MAX_CEQID)
+               return I40IW_ERR_INVALID_CEQ_ID;
+
+       pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       cq->cq_pa = info->cq_pa;
+       cq->cq_uk.cq_base = info->cq_base;
+       cq->shadow_area_pa = info->shadow_area_pa;
+       cq->cq_uk.shadow_area = info->shadow_area;
+       cq->shadow_read_threshold = info->shadow_read_threshold;
+       cq->dev = info->dev;
+       cq->ceq_id = info->ceq_id;
+       cq->cq_uk.cq_size = info->num_elem;
+       cq->cq_type = I40IW_CQ_TYPE_CQP;
+       cq->ceqe_mask = info->ceqe_mask;
+       I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
+
+       cq->cq_uk.cq_id = 0;    /* control cq is id 0 always */
+       cq->ceq_id_valid = info->ceq_id_valid;
+       cq->tph_en = info->tph_en;
+       cq->tph_val = info->tph_val;
+       cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
+
+       cq->pbl_list = info->pbl_list;
+       cq->virtual_map = info->virtual_map;
+       cq->pbl_chunk_size = info->pbl_chunk_size;
+       cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+       cq->cq_uk.polarity = true;
+
+       /* following are only for iw cqs so initialize them to zero */
+       cq->cq_uk.cqe_alloc_reg = NULL;
+       info->dev->ccq = cq;
+       return 0;
+}
+
+/**
+ * i40iw_sc_ccq_create_done - poll cqp for ccq create
+ * @ccq: ccq sc struct
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
+{
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = ccq->dev->cqp;
+       return  i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
+}
+
+/**
+ * i40iw_sc_ccq_create - create control cq
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: overlow flag for ccq
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
+                                                 u64 scratch,
+                                                 bool check_overflow,
+                                                 bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+       enum i40iw_status_code ret_code;
+
+       cqp = ccq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+       set_64bit_val(wqe, 16,
+                     LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+       set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
+       set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+       set_64bit_val(wqe, 48,
+                     (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
+       set_64bit_val(wqe, 56,
+                     LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+       header = ccq->cq_uk.cq_id |
+                LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+                LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+                LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+                LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+                LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+                LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+                LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+               ret_code = i40iw_sc_ccq_create_done(ccq);
+               if (ret_code)
+                       return ret_code;
+       }
+       cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
+
+       return 0;
+}
+
+/**
+ * i40iw_sc_ccq_destroy - destroy ccq during close
+ * @ccq: ccq sc struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
+                                                  u64 scratch,
+                                                  bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+       enum i40iw_status_code ret_code = 0;
+       u32 tail, val, error;
+
+       cqp = ccq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
+       set_64bit_val(wqe, 40, ccq->shadow_area_pa);
+
+       header = ccq->cq_uk.cq_id |
+                LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+                LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+                LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+                LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+       if (error)
+               return I40IW_ERR_CQP_COMPL_ERROR;
+
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+               ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_cq_init - initialize completion q
+ * @cq: cq struct
+ * @info: cq initialization info
+ */
+static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
+                                              struct i40iw_cq_init_info *info)
+{
+       u32 __iomem *cqe_alloc_reg = NULL;
+       enum i40iw_status_code ret_code;
+       u32 pble_obj_cnt;
+       u32 arm_offset;
+
+       pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       cq->cq_pa = info->cq_base_pa;
+       cq->dev = info->dev;
+       cq->ceq_id = info->ceq_id;
+       arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
+       if (i40iw_get_hw_addr(cq->dev))
+               cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
+                                             arm_offset);
+       info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
+       ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
+       if (ret_code)
+               return ret_code;
+       cq->virtual_map = info->virtual_map;
+       cq->pbl_chunk_size = info->pbl_chunk_size;
+       cq->ceqe_mask = info->ceqe_mask;
+       cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
+
+       cq->shadow_area_pa = info->shadow_area_pa;
+       cq->shadow_read_threshold = info->shadow_read_threshold;
+
+       cq->ceq_id_valid = info->ceq_id_valid;
+       cq->tph_en = info->tph_en;
+       cq->tph_val = info->tph_val;
+
+       cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+       return 0;
+}
+
+/**
+ * i40iw_sc_cq_create - create completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @check_overflow: flag for overflow check
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
+                                                u64 scratch,
+                                                bool check_overflow,
+                                                bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
+               return I40IW_ERR_INVALID_CQ_ID;
+
+       if (cq->ceq_id > I40IW_MAX_CEQID)
+               return I40IW_ERR_INVALID_CEQ_ID;
+
+       cqp = cq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+
+       set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+
+       set_64bit_val(wqe, 40, cq->shadow_area_pa);
+       set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+       set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+       header = cq->cq_uk.cq_id |
+                LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+                LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+                LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+                LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+                LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+                LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+                LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_cq_destroy - destroy completion q
+ * @cq: cq struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
+                                                 u64 scratch,
+                                                 bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+
+       cqp = cq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+       set_64bit_val(wqe, 40, cq->shadow_area_pa);
+       set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+
+       header = cq->cq_uk.cq_id |
+                LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
+                LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+                LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+                LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+                LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+                LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_cq_modify - modify a Completion Queue
+ * @cq: cq struct
+ * @info: modification info struct
+ * @scratch:
+ * @post_sq: flag to post to sq
+ */
+static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
+                                                struct i40iw_modify_cq_info *info,
+                                                u64 scratch,
+                                                bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+       u32 cq_size, ceq_id, first_pm_pbl_idx;
+       u8 pbl_chunk_size;
+       bool virtual_map, ceq_id_valid, check_overflow;
+       u32 pble_obj_cnt;
+
+       if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
+               return I40IW_ERR_INVALID_CEQ_ID;
+
+       pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->cq_resize && info->virtual_map &&
+           (info->first_pm_pbl_idx >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       cqp = cq->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       cq->pbl_list = info->pbl_list;
+       cq->cq_pa = info->cq_pa;
+       cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
+
+       cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
+       if (info->ceq_change) {
+               ceq_id_valid = true;
+               ceq_id = info->ceq_id;
+       } else {
+               ceq_id_valid = cq->ceq_id_valid;
+               ceq_id = ceq_id_valid ? cq->ceq_id : 0;
+       }
+       virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
+       first_pm_pbl_idx = (info->cq_resize ?
+                           (info->virtual_map ? info->first_pm_pbl_idx : 0) :
+                           (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
+       pbl_chunk_size = (info->cq_resize ?
+                         (info->virtual_map ? info->pbl_chunk_size : 0) :
+                         (cq->virtual_map ? cq->pbl_chunk_size : 0));
+       check_overflow = info->check_overflow_change ? info->check_overflow :
+                        cq->check_overflow;
+       cq->cq_uk.cq_size = cq_size;
+       cq->ceq_id_valid = ceq_id_valid;
+       cq->ceq_id = ceq_id;
+       cq->virtual_map = virtual_map;
+       cq->first_pm_pbl_idx = first_pm_pbl_idx;
+       cq->pbl_chunk_size = pbl_chunk_size;
+       cq->check_overflow = check_overflow;
+
+       set_64bit_val(wqe, 0, cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+       set_64bit_val(wqe, 16,
+                     LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+       set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
+       set_64bit_val(wqe, 40, cq->shadow_area_pa);
+       set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
+       set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
+
+       header = cq->cq_uk.cq_id |
+                LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
+                LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
+                LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
+                LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+                LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
+                LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+                LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
+                LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
+                LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_init - initialize qp
+ * @qp: sc qp
+ * @info: initialization qp info
+ */
+static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
+                                              struct i40iw_qp_init_info *info)
+{
+       u32 __iomem *wqe_alloc_reg = NULL;
+       enum i40iw_status_code ret_code;
+       u32 pble_obj_cnt;
+       u8 wqe_size;
+       u32 offset;
+
+       qp->dev = info->pd->dev;
+       qp->sq_pa = info->sq_pa;
+       qp->rq_pa = info->rq_pa;
+       qp->hw_host_ctx_pa = info->host_ctx_pa;
+       qp->q2_pa = info->q2_pa;
+       qp->shadow_area_pa = info->shadow_area_pa;
+
+       qp->q2_buf = info->q2;
+       qp->pd = info->pd;
+       qp->hw_host_ctx = info->host_ctx;
+       offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
+       if (i40iw_get_hw_addr(qp->pd->dev))
+               wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+                                             offset);
+
+       info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
+       ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
+       if (ret_code)
+               return ret_code;
+       qp->virtual_map = info->virtual_map;
+
+       pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
+           (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       qp->llp_stream_handle = (void *)(-1);
+       qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
+
+       qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
+                                                   false);
+       i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
+                   __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
+       ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
+                                              &wqe_size);
+       if (ret_code)
+               return ret_code;
+       qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
+                               (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
+       i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
+                   "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
+                   __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
+       qp->sq_tph_val = info->sq_tph_val;
+       qp->rq_tph_val = info->rq_tph_val;
+       qp->sq_tph_en = info->sq_tph_en;
+       qp->rq_tph_en = info->rq_tph_en;
+       qp->rcv_tph_en = info->rcv_tph_en;
+       qp->xmit_tph_en = info->xmit_tph_en;
+       qp->qs_handle = qp->pd->dev->qs_handle;
+       qp->exception_lan_queue = qp->pd->dev->exception_lan_queue;
+
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_create - create qp
+ * @qp: sc qp
+ * @info: qp create info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_create(
+                               struct i40iw_sc_qp *qp,
+                               struct i40iw_create_qp_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+
+       if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
+           (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
+               return I40IW_ERR_INVALID_QP_ID;
+
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+
+       set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+       header = qp->qp_uk.qp_id |
+                LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
+                LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+                LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+                LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+                LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+                LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+                LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+                LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_modify - modify qp cqp wqe
+ * @qp: sc qp
+ * @info: modify qp info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_modify(
+                               struct i40iw_sc_qp *qp,
+                               struct i40iw_modify_qp_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+       u8 term_actions = 0;
+       u8 term_len = 0;
+
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
+               if (info->dont_send_fin)
+                       term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
+               if (info->dont_send_term)
+                       term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
+               if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
+                   (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
+                       term_len = info->termlen;
+       }
+
+       set_64bit_val(wqe,
+                     8,
+                     LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
+                     LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
+
+       set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+       set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+       header = qp->qp_uk.qp_id |
+                LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
+                LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
+                LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
+                LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
+                LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
+                LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
+                LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+                LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
+                LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
+                LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+                LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
+                LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
+                LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
+                LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_destroy - cqp destroy qp
+ * @qp: sc qp
+ * @scratch: u64 saved to be used during cqp completion
+ * @remove_hash_idx: flag if to remove hash idx
+ * @ignore_mw_bnd: memory window bind flag
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_destroy(
+                                       struct i40iw_sc_qp *qp,
+                                       u64 scratch,
+                                       bool remove_hash_idx,
+                                       bool ignore_mw_bnd,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+       set_64bit_val(wqe, 40, qp->shadow_area_pa);
+
+       header = qp->qp_uk.qp_id |
+                LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
+                LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
+                LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_flush_wqes - flush qp's wqe
+ * @qp: sc qp
+ * @info: dlush information
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
+                               struct i40iw_sc_qp *qp,
+                               struct i40iw_qp_flush_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 temp = 0;
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+       bool flush_sq = false, flush_rq = false;
+
+       if (info->rq && !qp->flush_rq)
+               flush_rq = true;
+
+       if (info->sq && !qp->flush_sq)
+               flush_sq = true;
+
+       qp->flush_sq |= flush_sq;
+       qp->flush_rq |= flush_rq;
+       if (!flush_sq && !flush_rq) {
+               if (info->ae_code != I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR)
+                       return 0;
+       }
+
+       cqp = qp->pd->dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       if (info->userflushcode) {
+               if (flush_rq) {
+                       temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
+                               LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
+               }
+               if (flush_sq) {
+                       temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
+                               LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
+               }
+       }
+       set_64bit_val(wqe, 16, temp);
+
+       temp = (info->generate_ae) ?
+               info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
+
+       set_64bit_val(wqe, 8, temp);
+
+       header = qp->qp_uk.qp_id |
+                LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
+                LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
+                LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
+                LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_upload_context - upload qp's context
+ * @dev: sc device struct
+ * @info: upload context info ptr for return
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_qp_upload_context(
+                                       struct i40iw_sc_dev *dev,
+                                       struct i40iw_upload_context_info *info,
+                                       u64 scratch,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 16, info->buf_pa);
+
+       header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
+                LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
+                LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
+                LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_qp_setctx - set qp's context
+ * @qp: sc qp
+ * @qp_ctx: context ptr
+ * @info: ctx info
+ */
+static enum i40iw_status_code i40iw_sc_qp_setctx(
+                               struct i40iw_sc_qp *qp,
+                               u64 *qp_ctx,
+                               struct i40iw_qp_host_ctx_info *info)
+{
+       struct i40iwarp_offload_info *iw;
+       struct i40iw_tcp_offload_info *tcp;
+       u64 qw0, qw3, qw7 = 0;
+
+       iw = info->iwarp_info;
+       tcp = info->tcp_info;
+       qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
+             LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
+             LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
+             LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
+             LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
+             LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
+             LS_64(info->push_idx, I40IWQPC_PPIDX) |
+             LS_64(info->push_mode_en, I40IWQPC_PMENA);
+
+       set_64bit_val(qp_ctx, 8, qp->sq_pa);
+       set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+       qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+             LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+             LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
+
+       set_64bit_val(qp_ctx,
+                     128,
+                     LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
+
+       set_64bit_val(qp_ctx,
+                     136,
+                     LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
+                     LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
+
+       set_64bit_val(qp_ctx,
+                     168,
+                     LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
+       set_64bit_val(qp_ctx,
+                     176,
+                     LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+                     LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+                     LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
+                     LS_64(qp->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
+
+       if (info->iwarp_info_valid) {
+               qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
+                      LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
+
+               qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
+               set_64bit_val(qp_ctx, 144, qp->q2_pa);
+               set_64bit_val(qp_ctx,
+                             152,
+                             LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
+
+               /*
+               * Hard-code IRD_SIZE to hw-limit, 128, in qpctx, i.e matching an
+               *advertisable IRD of 64
+               */
+               iw->ird_size = I40IW_QPCTX_ENCD_MAXIRD;
+               set_64bit_val(qp_ctx,
+                             160,
+                             LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
+                             LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
+                             LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
+                             LS_64(iw->rd_enable, I40IWQPC_RDOK) |
+                             LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
+                             LS_64(iw->bind_en, I40IWQPC_BINDEN) |
+                             LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
+                             LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
+                             LS_64(1, I40IWQPC_IWARPMODE) |
+                             LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
+                             LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
+                             LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
+                             LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
+                             LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
+       }
+       if (info->tcp_info_valid) {
+               qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
+                      LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
+                      LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
+                      LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
+                      LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
+                      LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
+                      LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
+
+               qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
+                      LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
+                      LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
+                      LS_64(tcp->tos, I40IWQPC_TOS) |
+                      LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
+                      LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
+
+               qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
+               set_64bit_val(qp_ctx,
+                             32,
+                             LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
+                             LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
+
+               set_64bit_val(qp_ctx,
+                             40,
+                             LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
+                             LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
+
+               set_64bit_val(qp_ctx,
+                             48,
+                             LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
+                               LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
+                               LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
+
+               qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
+                      LS_64(tcp->wscale, I40IWQPC_WSCALE) |
+                      LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
+                      LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
+                      LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
+                      LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
+                      LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
+
+               set_64bit_val(qp_ctx,
+                             72,
+                             LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
+                             LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
+               set_64bit_val(qp_ctx,
+                             80,
+                             LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
+                             LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
+
+               set_64bit_val(qp_ctx,
+                             88,
+                             LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
+                             LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
+               set_64bit_val(qp_ctx,
+                             96,
+                             LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
+                             LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
+               set_64bit_val(qp_ctx,
+                             104,
+                             LS_64(tcp->srtt, I40IWQPC_SRTT) |
+                             LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
+               set_64bit_val(qp_ctx,
+                             112,
+                             LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
+                             LS_64(tcp->cwnd, I40IWQPC_CWND));
+               set_64bit_val(qp_ctx,
+                             120,
+                             LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
+                             LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
+               set_64bit_val(qp_ctx,
+                             128,
+                             LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
+                             LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
+               set_64bit_val(qp_ctx,
+                             184,
+                             LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
+                             LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
+               set_64bit_val(qp_ctx,
+                             192,
+                             LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
+                             LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
+       }
+
+       set_64bit_val(qp_ctx, 0, qw0);
+       set_64bit_val(qp_ctx, 24, qw3);
+       set_64bit_val(qp_ctx, 56, qw7);
+
+       i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
+                       qp_ctx, I40IW_QP_CTX_SIZE);
+       return 0;
+}
+
+/**
+ * i40iw_sc_alloc_stag - mr stag alloc
+ * @dev: sc device struct
+ * @info: stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_alloc_stag(
+                               struct i40iw_sc_dev *dev,
+                               struct i40iw_allocate_stag_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe,
+                     8,
+                     LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
+                     LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+       set_64bit_val(wqe,
+                     40,
+                     LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
+
+       header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_STAG_MR) |
+                LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+                LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+                LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+                LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+                LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+                LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_non_shared - non-shared mr registration
+ * @dev: sc device struct
+ * @info: mr info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
+                               struct i40iw_sc_dev *dev,
+                               struct i40iw_reg_ns_stag_info *info,
+                               u64 scratch,
+                               bool post_sq)
+{
+       u64 *wqe;
+       u64 temp;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+       u32 pble_obj_cnt;
+       bool remote_access;
+       u8 addr_type;
+
+       if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+                                  I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+               remote_access = true;
+       else
+               remote_access = false;
+
+       pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
+
+       if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
+               return I40IW_ERR_INVALID_PBLE_INDEX;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
+       set_64bit_val(wqe, 0, temp);
+
+       set_64bit_val(wqe,
+                     8,
+                     LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
+                     LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
+                     LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+       if (!info->chunk_size) {
+               set_64bit_val(wqe, 32, info->reg_addr_pa);
+               set_64bit_val(wqe, 48, 0);
+       } else {
+               set_64bit_val(wqe, 32, 0);
+               set_64bit_val(wqe, 48, info->first_pm_pbl_index);
+       }
+       set_64bit_val(wqe, 40, info->hmc_fcn_index);
+       set_64bit_val(wqe, 56, 0);
+
+       addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+       header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_STAG_MR) |
+                LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
+                LS_64(info->page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
+                LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+                LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+                LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+                LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
+                LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_mr_reg_shared - registered shared memory region
+ * @dev: sc device struct
+ * @info: info for shared memory registeration
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mr_reg_shared(
+                                       struct i40iw_sc_dev *dev,
+                                       struct i40iw_register_shared_stag *info,
+                                       u64 scratch,
+                                       bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 temp, va64, fbo, header;
+       u32 va32;
+       bool remote_access;
+       u8 addr_type;
+
+       if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
+                                  I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
+               remote_access = true;
+       else
+               remote_access = false;
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       va64 = (uintptr_t)(info->va);
+       va32 = (u32)(va64 & 0x00000000FFFFFFFF);
+       fbo = (u64)(va32 & (4096 - 1));
+
+       set_64bit_val(wqe,
+                     0,
+                     (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
+
+       set_64bit_val(wqe,
+                     8,
+                     LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+       temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
+              LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
+              LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
+       set_64bit_val(wqe, 16, temp);
+
+       addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
+       header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
+                LS_64(1, I40IW_CQPSQ_STAG_MR) |
+                LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
+                LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
+                LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_dealloc_stag - deallocate stag
+ * @dev: sc device struct
+ * @info: dealloc stag info
+ * @scratch: u64 saved to be used during cqp completion
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_dealloc_stag(
+                                       struct i40iw_sc_dev *dev,
+                                       struct i40iw_dealloc_stag_info *info,
+                                       u64 scratch,
+                                       bool post_sq)
+{
+       u64 header;
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe,
+                     8,
+                     LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
+
+       header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+                LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_query_stag - query hardware for stag
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @stag_index: stag index for query
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
+                                                 u64 scratch,
+                                                 u32 stag_index,
+                                                 bool post_sq)
+{
+       u64 header;
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
+
+       header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_mw_alloc - mw allocate
+ * @dev: sc device struct
+ * @scratch: u64 saved to be used during cqp completion
+ * @mw_stag_index:stag index
+ * @pd_id: pd is for this mw
+ * @post_sq: flag for cqp db to ring
+ */
+static enum i40iw_status_code i40iw_sc_mw_alloc(
+                                       struct i40iw_sc_dev *dev,
+                                       u64 scratch,
+                                       u32 mw_stag_index,
+                                       u16 pd_id,
+                                       bool post_sq)
+{
+       u64 header;
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
+
+       header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_send_lsmm - send last streaming mode message
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ * @stag: stag of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
+                              void *lsmm_buf,
+                              u32 size,
+                              i40iw_stag stag)
+{
+       u64 *wqe;
+       u64 header;
+       struct i40iw_qp_uk *qp_uk;
+
+       qp_uk = &qp->qp_uk;
+       wqe = qp_uk->sq_base->elem;
+
+       set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+       set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
+
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+                LS_64(1, I40IWQPSQ_STREAMMODE) |
+                LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+                LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
+                       wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_lsmm_nostag - for privilege qp
+ * @qp: sc qp struct
+ * @lsmm_buf: buffer with lsmm message
+ * @size: size of lsmm buffer
+ */
+static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
+                                     void *lsmm_buf,
+                                     u32 size)
+{
+       u64 *wqe;
+       u64 header;
+       struct i40iw_qp_uk *qp_uk;
+
+       qp_uk = &qp->qp_uk;
+       wqe = qp_uk->sq_base->elem;
+
+       set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
+
+       set_64bit_val(wqe, 8, size);
+
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+                LS_64(1, I40IWQPSQ_STREAMMODE) |
+                LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
+                LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
+                       wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_send_rtt - send last read0 or write0
+ * @qp: sc qp struct
+ * @read: Do read0 or write0
+ */
+static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
+{
+       u64 *wqe;
+       u64 header;
+       struct i40iw_qp_uk *qp_uk;
+
+       qp_uk = &qp->qp_uk;
+       wqe = qp_uk->sq_base->elem;
+
+       set_64bit_val(wqe, 0, 0);
+       set_64bit_val(wqe, 8, 0);
+       set_64bit_val(wqe, 16, 0);
+       if (read) {
+               header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
+                        LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
+                        LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+               set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
+       } else {
+               header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+                        LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+       }
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
+                       wqe, I40IW_QP_WQE_MIN_SIZE);
+}
+
+/**
+ * i40iw_sc_post_wqe0 - send wqe with opcode
+ * @qp: sc qp struct
+ * @opcode: opcode to use for wqe0
+ */
+static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
+{
+       u64 *wqe;
+       u64 header;
+       struct i40iw_qp_uk *qp_uk;
+
+       qp_uk = &qp->qp_uk;
+       wqe = qp_uk->sq_base->elem;
+
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+       switch (opcode) {
+       case I40IWQP_OP_NOP:
+               set_64bit_val(wqe, 0, 0);
+               set_64bit_val(wqe, 8, 0);
+               set_64bit_val(wqe, 16, 0);
+               header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+                        LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
+
+               i40iw_insert_wqe_hdr(wqe, header);
+               break;
+       case I40IWQP_OP_RDMA_SEND:
+               set_64bit_val(wqe, 0, 0);
+               set_64bit_val(wqe, 8, 0);
+               set_64bit_val(wqe, 16, 0);
+               header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
+                        LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
+                        LS_64(1, I40IWQPSQ_STREAMMODE) |
+                        LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
+
+               i40iw_insert_wqe_hdr(wqe, header);
+               break;
+       default:
+               i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
+                           __func__);
+               break;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_dma_mem query_fpm_mem;
+       struct i40iw_virt_mem virt_mem;
+       struct i40iw_vfdev *vf_dev = NULL;
+       u32 mem_size;
+       enum i40iw_status_code ret_code = 0;
+       bool poll_registers = true;
+       u16 iw_vf_idx;
+       u8 wait_type;
+
+       if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+           (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+               return I40IW_ERR_INVALID_HMCFN_ID;
+
+       i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
+                   dev->hmc_fn_id);
+       if (hmc_fn_id == dev->hmc_fn_id) {
+               hmc_info = dev->hmc_info;
+               query_fpm_mem.pa = dev->fpm_query_buf_pa;
+               query_fpm_mem.va = dev->fpm_query_buf;
+       } else {
+               vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
+               if (!vf_dev)
+                       return I40IW_ERR_INVALID_VF_ID;
+
+               hmc_info = &vf_dev->hmc_info;
+               iw_vf_idx = vf_dev->iw_vf_idx;
+               i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
+                           hmc_info, hmc_info->hmc_obj);
+               if (!vf_dev->fpm_query_buf) {
+                       if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
+                               ret_code = i40iw_alloc_query_fpm_buf(dev,
+                                                                    &dev->vf_fpm_query_buf[iw_vf_idx]);
+                               if (ret_code)
+                                       return ret_code;
+                       }
+                       vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
+                       vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
+               }
+               query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
+               query_fpm_mem.va = vf_dev->fpm_query_buf;
+               /**
+                * It is HARDWARE specific:
+                * this call is done by PF for VF and
+                * i40iw_sc_query_fpm_values needs ccq poll
+                * because PF ccq is already created.
+                */
+               poll_registers = false;
+       }
+
+       hmc_info->hmc_fn_id = hmc_fn_id;
+
+       if (hmc_fn_id != dev->hmc_fn_id) {
+               ret_code =
+                       i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+       } else {
+               wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+                           (u8)I40IW_CQP_WAIT_POLL_CQ;
+
+               ret_code = i40iw_sc_query_fpm_values(
+                                       dev->cqp,
+                                       0,
+                                       hmc_info->hmc_fn_id,
+                                       &query_fpm_mem,
+                                       true,
+                                       wait_type);
+       }
+       if (ret_code)
+               return ret_code;
+
+       /* parse the fpm_query_buf and fill hmc obj info */
+       ret_code =
+               i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
+                                            hmc_info,
+                                            &dev->hmc_fpm_misc);
+       if (ret_code)
+               return ret_code;
+       i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
+                       query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
+
+       if (hmc_fn_id != dev->hmc_fn_id) {
+               i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
+
+               /* parse the fpm_commit_buf and fill hmc obj info */
+               i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj);
+               mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+                          (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
+               ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+               if (ret_code)
+                       return ret_code;
+               hmc_info->sd_table.sd_entry = virt_mem.va;
+       }
+
+       /* fill size of objects which are fixed */
+       hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].size = 4;
+       hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].size = 4;
+       hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size = 8;
+       hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
+       hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
+ * populates fpm base address in hmc_info
+ * @dev : ptr to i40iw_dev struct
+ * @hmc_fn_id: hmc function id
+ */
+static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
+                                                       u8 hmc_fn_id)
+{
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_hmc_obj_info *obj_info;
+       u64 *buf;
+       struct i40iw_dma_mem commit_fpm_mem;
+       u32 i, j;
+       enum i40iw_status_code ret_code = 0;
+       bool poll_registers = true;
+       u8 wait_type;
+
+       if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
+           (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
+               return I40IW_ERR_INVALID_HMCFN_ID;
+
+       if (hmc_fn_id == dev->hmc_fn_id) {
+               hmc_info = dev->hmc_info;
+       } else {
+               hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
+               poll_registers = false;
+       }
+       if (!hmc_info)
+               return I40IW_ERR_BAD_PTR;
+
+       obj_info = hmc_info->hmc_obj;
+       buf = dev->fpm_commit_buf;
+
+       /* copy cnt values in commit buf */
+       for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
+            i++, j += 8)
+               set_64bit_val(buf, j, (u64)obj_info[i].cnt);
+
+       set_64bit_val(buf, 40, 0);   /* APBVT rsvd */
+
+       commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
+       commit_fpm_mem.va = dev->fpm_commit_buf;
+       wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
+                       (u8)I40IW_CQP_WAIT_POLL_CQ;
+       ret_code = i40iw_sc_commit_fpm_values(
+                                       dev->cqp,
+                                       0,
+                                       hmc_info->hmc_fn_id,
+                                       &commit_fpm_mem,
+                                       true,
+                                       wait_type);
+
+       /* parse the fpm_commit_buf and fill hmc obj info */
+       if (!ret_code)
+               ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, hmc_info->hmc_obj);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
+                       commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
+
+       return ret_code;
+}
+
+/**
+ * cqp_sds_wqe_fill - fill cqp wqe doe sd
+ * @cqp: struct for cqp hw
+ * @info; sd info for wqe
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
+                                              struct i40iw_update_sds_info *info,
+                                              u64 scratch)
+{
+       u64 data;
+       u64 header;
+       u64 *wqe;
+       int mem_entries, wqe_entries;
+       struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       I40IW_CQP_INIT_WQE(wqe);
+       wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
+       mem_entries = info->cnt - wqe_entries;
+
+       header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
+                LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
+
+       if (mem_entries) {
+               memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4));
+               data = sdbuf->pa;
+       } else {
+               data = 0;
+       }
+       data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
+
+       set_64bit_val(wqe, 16, data);
+
+       switch (wqe_entries) {
+       case 3:
+               set_64bit_val(wqe, 48,
+                             (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+                                       LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+               set_64bit_val(wqe, 56, info->entry[2].data);
+               /* fallthrough */
+       case 2:
+               set_64bit_val(wqe, 32,
+                             (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
+                                       LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
+
+               set_64bit_val(wqe, 40, info->entry[1].data);
+               /* fallthrough */
+       case 1:
+               set_64bit_val(wqe, 0,
+                             LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
+
+               set_64bit_val(wqe, 8, info->entry[0].data);
+               break;
+       default:
+               break;
+       }
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       return 0;
+}
+
+/**
+ * i40iw_update_pe_sds - cqp wqe for sd
+ * @dev: ptr to i40iw_dev struct
+ * @info: sd info for sd's
+ * @scratch: u64 saved to be used during cqp completion
+ */
+static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
+                                                 struct i40iw_update_sds_info *info,
+                                                 u64 scratch)
+{
+       struct i40iw_sc_cqp *cqp = dev->cqp;
+       enum i40iw_status_code ret_code;
+
+       ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
+       if (!ret_code)
+               i40iw_sc_cqp_post_sq(cqp);
+
+       return ret_code;
+}
+
+/**
+ * i40iw_update_sds_noccq - update sd before ccq created
+ * @dev: sc device struct
+ * @info: sd info for sd's
+ */
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+                                             struct i40iw_update_sds_info *info)
+{
+       u32 error, val, tail;
+       struct i40iw_sc_cqp *cqp = dev->cqp;
+       enum i40iw_status_code ret_code;
+
+       ret_code = cqp_sds_wqe_fill(cqp, info, 0);
+       if (ret_code)
+               return ret_code;
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+       if (error)
+               return I40IW_ERR_CQP_COMPL_ERROR;
+
+       i40iw_sc_cqp_post_sq(cqp);
+       ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
+
+       return ret_code;
+}
+
+/**
+ * i40iw_sc_suspend_qp - suspend qp for param change
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+                                          struct i40iw_sc_qp *qp,
+                                          u64 scratch)
+{
+       u64 header;
+       u64 *wqe;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
+                LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_resume_qp - resume qp after suspend
+ * @cqp: struct for cqp hw
+ * @qp: sc qp struct
+ * @scratch: u64 saved to be used during cqp completion
+ */
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+                                         struct i40iw_sc_qp *qp,
+                                         u64 scratch)
+{
+       u64 header;
+       u64 *wqe;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe,
+                     16,
+                       LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
+
+       header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
+                LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+/**
+ * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
+ * @cqp: struct for cqp hw
+ * @scratch: u64 saved to be used during cqp completion
+ * @hmc_fn_id: hmc function id
+ * @post_sq: flag for cqp db to ring
+ * @poll_registers: flag to poll register for cqp completion
+ */
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
+                                       struct i40iw_sc_cqp *cqp,
+                                       u64 scratch,
+                                       u8 hmc_fn_id,
+                                       bool post_sq,
+                                       bool poll_registers)
+{
+       u64 header;
+       u64 *wqe;
+       u32 tail, val, error;
+       enum i40iw_status_code ret_code = 0;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+       set_64bit_val(wqe,
+                     16,
+                     LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
+
+       header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       i40iw_insert_wqe_hdr(wqe, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+       i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
+       if (error) {
+               ret_code = I40IW_ERR_CQP_COMPL_ERROR;
+               return ret_code;
+       }
+       if (post_sq) {
+               i40iw_sc_cqp_post_sq(cqp);
+               if (poll_registers)
+                       /* check for cqp sq tail update */
+                       ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
+               else
+                       ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
+                                                                I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
+                                                                NULL);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_ring_full - check if cqp ring is full
+ * @cqp: struct for cqp hw
+ */
+static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
+{
+       return I40IW_RING_FULL_ERR(cqp->sq_ring);
+}
+
+/**
+ * i40iw_config_fpm_values - configure HMC objects
+ * @dev: sc device struct
+ * @qp_count: desired qp count
+ */
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
+{
+       struct i40iw_virt_mem virt_mem;
+       u32 i, mem_size;
+       u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
+       u32 powerof2;
+       u64 sd_needed, bytes_needed;
+       u32 loop_count = 0;
+
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
+       enum i40iw_status_code ret_code = 0;
+
+       hmc_info = dev->hmc_info;
+       hmc_fpm_misc = &dev->hmc_fpm_misc;
+
+       ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
+       if (ret_code) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "i40iw_sc_init_iw_hmc returned error_code = %d\n",
+                           ret_code);
+               return ret_code;
+       }
+
+       bytes_needed = 0;
+       for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+               hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+               bytes_needed +=
+                   (hmc_info->hmc_obj[i].max_cnt) * (hmc_info->hmc_obj[i].size);
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s i[%04d] max_cnt[0x%04X] size[0x%04llx]\n",
+                           __func__, i, hmc_info->hmc_obj[i].max_cnt,
+                           hmc_info->hmc_obj[i].size);
+       }
+       sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1; /* round up */
+       i40iw_debug(dev, I40IW_DEBUG_HMC,
+                   "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
+                   __func__, sd_needed, hmc_info->first_sd_index);
+       i40iw_debug(dev, I40IW_DEBUG_HMC,
+                   "%s: bytes_needed=0x%llx sd count %d where max sd is %d\n",
+                   __func__, bytes_needed, hmc_info->sd_table.sd_cnt,
+                   hmc_fpm_misc->max_sds);
+
+       qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
+       qpwantedoriginal = qpwanted;
+       mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
+       pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
+
+       i40iw_debug(dev, I40IW_DEBUG_HMC,
+                   "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
+                   qp_count, hmc_fpm_misc->max_sds,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
+
+       do {
+               ++loop_count;
+               hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
+               hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
+                       min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
+               hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
+               hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
+                                       qpwanted * hmc_fpm_misc->ht_multiplier;
+               hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
+                       hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
+               hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
+               hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
+
+               hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = I40IW_MAX_WQ_ENTRIES * qpwanted;
+               hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = 4 * I40IW_MAX_IRD_SIZE * qpwanted;
+               hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
+                       hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
+               hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
+                       hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
+               hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
+                       ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
+               hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
+               hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
+               hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
+
+               /* How much memory is needed for all the objects. */
+               bytes_needed = 0;
+               for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+                       bytes_needed +=
+                           (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+               sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1;
+               if ((loop_count > 1000) ||
+                   ((!(loop_count % 10)) &&
+                   (qpwanted > qpwantedoriginal * 2 / 3))) {
+                       if (qpwanted > FPM_MULTIPLIER) {
+                               qpwanted -= FPM_MULTIPLIER;
+                               powerof2 = 1;
+                               while (powerof2 < qpwanted)
+                                       powerof2 *= 2;
+                               powerof2 /= 2;
+                               qpwanted = powerof2;
+                       } else {
+                               qpwanted /= 2;
+                       }
+               }
+               if (mrwanted > FPM_MULTIPLIER * 10)
+                       mrwanted -= FPM_MULTIPLIER * 10;
+               if (pblewanted > FPM_MULTIPLIER * 1000)
+                       pblewanted -= FPM_MULTIPLIER * 1000;
+       } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
+
+       bytes_needed = 0;
+       for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) {
+               bytes_needed += (hmc_info->hmc_obj[i].cnt) * (hmc_info->hmc_obj[i].size);
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s i[%04d] cnt[0x%04x] size[0x%04llx]\n",
+                           __func__, i, hmc_info->hmc_obj[i].cnt,
+                           hmc_info->hmc_obj[i].size);
+       }
+       sd_needed = (bytes_needed / I40IW_HMC_DIRECT_BP_SIZE) + 1;    /* round up not truncate. */
+
+       i40iw_debug(dev, I40IW_DEBUG_HMC,
+                   "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
+                   loop_count, sd_needed,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
+                   hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
+
+       ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
+       if (ret_code) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "configure_iw_fpm returned error_code[x%08X]\n",
+                           i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
+               return ret_code;
+       }
+
+       hmc_info->sd_table.sd_cnt = (u32)sd_needed;
+
+       mem_size = sizeof(struct i40iw_hmc_sd_entry) *
+                  (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
+       ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
+       if (ret_code) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s: failed to allocate memory for sd_entry buffer\n",
+                           __func__);
+               return ret_code;
+       }
+       hmc_info->sd_table.sd_entry = virt_mem.va;
+
+       return ret_code;
+}
+
+/**
+ * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
+ * @dev: rdma device
+ * @pcmdinfo: cqp command info
+ */
+static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
+                                                struct cqp_commands_info *pcmdinfo)
+{
+       enum i40iw_status_code status;
+       struct i40iw_dma_mem values_mem;
+
+       dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
+       switch (pcmdinfo->cqp_cmd) {
+       case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
+               status = i40iw_sc_del_local_mac_ipaddr_entry(
+                               pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
+                               pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
+                               pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
+                               pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_CEQ_DESTROY:
+               status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
+                                             pcmdinfo->in.u.ceq_destroy.scratch,
+                                             pcmdinfo->post_sq);
+               break;
+       case OP_AEQ_DESTROY:
+               status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
+                                             pcmdinfo->in.u.aeq_destroy.scratch,
+                                             pcmdinfo->post_sq);
+
+               break;
+       case OP_DELETE_ARP_CACHE_ENTRY:
+               status = i40iw_sc_del_arp_cache_entry(
+                               pcmdinfo->in.u.del_arp_cache_entry.cqp,
+                               pcmdinfo->in.u.del_arp_cache_entry.scratch,
+                               pcmdinfo->in.u.del_arp_cache_entry.arp_index,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_MANAGE_APBVT_ENTRY:
+               status = i40iw_sc_manage_apbvt_entry(
+                               pcmdinfo->in.u.manage_apbvt_entry.cqp,
+                               &pcmdinfo->in.u.manage_apbvt_entry.info,
+                               pcmdinfo->in.u.manage_apbvt_entry.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_CEQ_CREATE:
+               status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
+                                            pcmdinfo->in.u.ceq_create.scratch,
+                                            pcmdinfo->post_sq);
+               break;
+       case OP_AEQ_CREATE:
+               status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
+                                            pcmdinfo->in.u.aeq_create.scratch,
+                                            pcmdinfo->post_sq);
+               break;
+       case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
+               status = i40iw_sc_alloc_local_mac_ipaddr_entry(
+                               pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
+                               pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
+               status = i40iw_sc_add_local_mac_ipaddr_entry(
+                               pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
+                               &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
+                               pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_MANAGE_QHASH_TABLE_ENTRY:
+               status = i40iw_sc_manage_qhash_table_entry(
+                               pcmdinfo->in.u.manage_qhash_table_entry.cqp,
+                               &pcmdinfo->in.u.manage_qhash_table_entry.info,
+                               pcmdinfo->in.u.manage_qhash_table_entry.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_QP_MODIFY:
+               status = i40iw_sc_qp_modify(
+                               pcmdinfo->in.u.qp_modify.qp,
+                               &pcmdinfo->in.u.qp_modify.info,
+                               pcmdinfo->in.u.qp_modify.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_QP_UPLOAD_CONTEXT:
+               status = i40iw_sc_qp_upload_context(
+                               pcmdinfo->in.u.qp_upload_context.dev,
+                               &pcmdinfo->in.u.qp_upload_context.info,
+                               pcmdinfo->in.u.qp_upload_context.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_CQ_CREATE:
+               status = i40iw_sc_cq_create(
+                               pcmdinfo->in.u.cq_create.cq,
+                               pcmdinfo->in.u.cq_create.scratch,
+                               pcmdinfo->in.u.cq_create.check_overflow,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_CQ_DESTROY:
+               status = i40iw_sc_cq_destroy(
+                               pcmdinfo->in.u.cq_destroy.cq,
+                               pcmdinfo->in.u.cq_destroy.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_QP_CREATE:
+               status = i40iw_sc_qp_create(
+                               pcmdinfo->in.u.qp_create.qp,
+                               &pcmdinfo->in.u.qp_create.info,
+                               pcmdinfo->in.u.qp_create.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_QP_DESTROY:
+               status = i40iw_sc_qp_destroy(
+                               pcmdinfo->in.u.qp_destroy.qp,
+                               pcmdinfo->in.u.qp_destroy.scratch,
+                               pcmdinfo->in.u.qp_destroy.remove_hash_idx,
+                               pcmdinfo->in.u.qp_destroy.
+                               ignore_mw_bnd,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_ALLOC_STAG:
+               status = i40iw_sc_alloc_stag(
+                               pcmdinfo->in.u.alloc_stag.dev,
+                               &pcmdinfo->in.u.alloc_stag.info,
+                               pcmdinfo->in.u.alloc_stag.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_MR_REG_NON_SHARED:
+               status = i40iw_sc_mr_reg_non_shared(
+                               pcmdinfo->in.u.mr_reg_non_shared.dev,
+                               &pcmdinfo->in.u.mr_reg_non_shared.info,
+                               pcmdinfo->in.u.mr_reg_non_shared.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_DEALLOC_STAG:
+               status = i40iw_sc_dealloc_stag(
+                               pcmdinfo->in.u.dealloc_stag.dev,
+                               &pcmdinfo->in.u.dealloc_stag.info,
+                               pcmdinfo->in.u.dealloc_stag.scratch,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_MW_ALLOC:
+               status = i40iw_sc_mw_alloc(
+                               pcmdinfo->in.u.mw_alloc.dev,
+                               pcmdinfo->in.u.mw_alloc.scratch,
+                               pcmdinfo->in.u.mw_alloc.mw_stag_index,
+                               pcmdinfo->in.u.mw_alloc.pd_id,
+                               pcmdinfo->post_sq);
+
+               break;
+       case OP_QP_FLUSH_WQES:
+               status = i40iw_sc_qp_flush_wqes(
+                               pcmdinfo->in.u.qp_flush_wqes.qp,
+                               &pcmdinfo->in.u.qp_flush_wqes.info,
+                               pcmdinfo->in.u.qp_flush_wqes.
+                               scratch, pcmdinfo->post_sq);
+               break;
+       case OP_ADD_ARP_CACHE_ENTRY:
+               status = i40iw_sc_add_arp_cache_entry(
+                               pcmdinfo->in.u.add_arp_cache_entry.cqp,
+                               &pcmdinfo->in.u.add_arp_cache_entry.info,
+                               pcmdinfo->in.u.add_arp_cache_entry.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_MANAGE_PUSH_PAGE:
+               status = i40iw_sc_manage_push_page(
+                               pcmdinfo->in.u.manage_push_page.cqp,
+                               &pcmdinfo->in.u.manage_push_page.info,
+                               pcmdinfo->in.u.manage_push_page.scratch,
+                               pcmdinfo->post_sq);
+               break;
+       case OP_UPDATE_PE_SDS:
+               /* case I40IW_CQP_OP_UPDATE_PE_SDS */
+               status = i40iw_update_pe_sds(
+                               pcmdinfo->in.u.update_pe_sds.dev,
+                               &pcmdinfo->in.u.update_pe_sds.info,
+                               pcmdinfo->in.u.update_pe_sds.
+                               scratch);
+
+               break;
+       case OP_MANAGE_HMC_PM_FUNC_TABLE:
+               status = i40iw_sc_manage_hmc_pm_func_table(
+                               pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
+                               pcmdinfo->in.u.manage_hmc_pm.scratch,
+                               (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
+                               pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
+                               true);
+               break;
+       case OP_SUSPEND:
+               status = i40iw_sc_suspend_qp(
+                               pcmdinfo->in.u.suspend_resume.cqp,
+                               pcmdinfo->in.u.suspend_resume.qp,
+                               pcmdinfo->in.u.suspend_resume.scratch);
+               break;
+       case OP_RESUME:
+               status = i40iw_sc_resume_qp(
+                               pcmdinfo->in.u.suspend_resume.cqp,
+                               pcmdinfo->in.u.suspend_resume.qp,
+                               pcmdinfo->in.u.suspend_resume.scratch);
+               break;
+       case OP_MANAGE_VF_PBLE_BP:
+               status = i40iw_manage_vf_pble_bp(
+                               pcmdinfo->in.u.manage_vf_pble_bp.cqp,
+                               &pcmdinfo->in.u.manage_vf_pble_bp.info,
+                               pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
+               break;
+       case OP_QUERY_FPM_VALUES:
+               values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
+               values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
+               status = i40iw_sc_query_fpm_values(
+                               pcmdinfo->in.u.query_fpm_values.cqp,
+                               pcmdinfo->in.u.query_fpm_values.scratch,
+                               pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
+                               &values_mem, true, I40IW_CQP_WAIT_EVENT);
+               break;
+       case OP_COMMIT_FPM_VALUES:
+               values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
+               values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
+               status = i40iw_sc_commit_fpm_values(
+                               pcmdinfo->in.u.commit_fpm_values.cqp,
+                               pcmdinfo->in.u.commit_fpm_values.scratch,
+                               pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
+                               &values_mem,
+                               true,
+                               I40IW_CQP_WAIT_EVENT);
+               break;
+       default:
+               status = I40IW_NOT_SUPPORTED;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ * i40iw_process_cqp_cmd - process all cqp commands
+ * @dev: sc device struct
+ * @pcmdinfo: cqp command info
+ */
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+                                            struct cqp_commands_info *pcmdinfo)
+{
+       enum i40iw_status_code status = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&dev->cqp_lock, flags);
+       if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
+               status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+       else
+               list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
+       spin_unlock_irqrestore(&dev->cqp_lock, flags);
+       return status;
+}
+
+/**
+ * i40iw_process_bh - called from tasklet for cqp list
+ * @dev: sc device struct
+ */
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
+{
+       enum i40iw_status_code status = 0;
+       struct cqp_commands_info *pcmdinfo;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&dev->cqp_lock, flags);
+       while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
+               pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
+
+               status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
+               if (status)
+                       break;
+       }
+       spin_unlock_irqrestore(&dev->cqp_lock, flags);
+       return status;
+}
+
+/**
+ * i40iw_iwarp_opcode - determine if incoming is rdma layer
+ * @info: aeq info for the packet
+ * @pkt: packet for error
+ */
+static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
+{
+       u16 *mpa;
+       u32 opcode = 0xffffffff;
+
+       if (info->q2_data_written) {
+               mpa = (u16 *)pkt;
+               opcode = ntohs(mpa[1]) & 0xf;
+       }
+       return opcode;
+}
+
+/**
+ * i40iw_locate_mpa - return pointer to mpa in the pkt
+ * @pkt: packet with data
+ */
+static u8 *i40iw_locate_mpa(u8 *pkt)
+{
+       /* skip over ethernet header */
+       pkt += I40IW_MAC_HLEN;
+
+       /* Skip over IP and TCP headers */
+       pkt += 4 * (pkt[0] & 0x0f);
+       pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+       return pkt;
+}
+
+/**
+ * i40iw_setup_termhdr - termhdr for terminate pkt
+ * @qp: sc qp ptr for pkt
+ * @hdr: term hdr
+ * @opcode: flush opcode for termhdr
+ * @layer_etype: error layer + error type
+ * @err: error cod ein the header
+ */
+static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
+                               struct i40iw_terminate_hdr *hdr,
+                               enum i40iw_flush_opcode opcode,
+                               u8 layer_etype,
+                               u8 err)
+{
+       qp->flush_code = opcode;
+       hdr->layer_etype = layer_etype;
+       hdr->error_code = err;
+}
+
+/**
+ * i40iw_bld_terminate_hdr - build terminate message header
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
+                                  struct i40iw_aeqe_info *info)
+{
+       u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+       u16 ddp_seg_len;
+       int copy_len = 0;
+       u8 is_tagged = 0;
+       enum i40iw_flush_opcode flush_code = FLUSH_INVALID;
+       u32 opcode;
+       struct i40iw_terminate_hdr *termhdr;
+
+       termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
+       memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
+
+       if (info->q2_data_written) {
+               /* Use data from offending packet to fill in ddp & rdma hdrs */
+               pkt = i40iw_locate_mpa(pkt);
+               ddp_seg_len = ntohs(*(u16 *)pkt);
+               if (ddp_seg_len) {
+                       copy_len = 2;
+                       termhdr->hdrct = DDP_LEN_FLAG;
+                       if (pkt[2] & 0x80) {
+                               is_tagged = 1;
+                               if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+                                       copy_len += TERM_DDP_LEN_TAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+                       } else {
+                               if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+                                       copy_len += TERM_DDP_LEN_UNTAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+
+                               if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+                                       if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+                                               copy_len += TERM_RDMA_LEN;
+                                               termhdr->hdrct |= RDMA_HDR_FLAG;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       opcode = i40iw_iwarp_opcode(info, pkt);
+
+       switch (info->ae_id) {
+       case I40IW_AE_AMP_UNALLOCATED_STAG:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+                                           (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
+               else
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+               break;
+       case I40IW_AE_AMP_BOUNDS_VIOLATION:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               if (info->q2_data_written)
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+                                           (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
+               else
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
+               break;
+       case I40IW_AE_AMP_BAD_PD:
+               switch (opcode) {
+               case I40IW_OP_TYPE_RDMA_WRITE:
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
+                                           (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
+                       break;
+               case I40IW_OP_TYPE_SEND_INV:
+               case I40IW_OP_TYPE_SEND_SOL_INV:
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
+                       break;
+               default:
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
+               }
+               break;
+       case I40IW_AE_AMP_INVALID_STAG:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
+               break;
+       case I40IW_AE_AMP_BAD_QP:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+               break;
+       case I40IW_AE_AMP_BAD_STAG_KEY:
+       case I40IW_AE_AMP_BAD_STAG_INDEX:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               switch (opcode) {
+               case I40IW_OP_TYPE_SEND_INV:
+               case I40IW_OP_TYPE_SEND_SOL_INV:
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
+                       break;
+               default:
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                           (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
+               }
+               break;
+       case I40IW_AE_AMP_RIGHTS_VIOLATION:
+       case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+       case I40IW_AE_PRIV_OPERATION_DENIED:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
+               break;
+       case I40IW_AE_AMP_TO_WRAP:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
+               break;
+       case I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+                                   (LAYER_MPA << 4) | DDP_LLP, MPA_MARKER);
+               break;
+       case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                   (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
+               break;
+       case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
+       case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+                                   (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+               break;
+       case I40IW_AE_LCE_QP_CATASTROPHIC:
+       case I40IW_AE_DDP_NO_L_BIT:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+                                   (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
+               break;
+       case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
+       case I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
+               break;
+       case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+               qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
+               break;
+       case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+               if (is_tagged)
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                           (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
+               else
+                       i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                           (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
+               break;
+       case I40IW_AE_DDP_UBE_INVALID_MO:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
+               break;
+       case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
+               break;
+       case I40IW_AE_DDP_UBE_INVALID_QN:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                   (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
+               break;
+       case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
+               break;
+       case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
+               break;
+       default:
+               i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
+                                   (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
+               break;
+       }
+
+       if (copy_len)
+               memcpy(termhdr + 1, pkt, copy_len);
+
+       if (flush_code && !info->in_rdrsp_wr)
+               qp->sq_flush = (info->sq) ? true : false;
+
+       return sizeof(struct i40iw_terminate_hdr) + copy_len;
+}
+
+/**
+ * i40iw_terminate_send_fin() - Send fin for terminate message
+ * @qp: qp associated with received terminate AE
+ */
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
+{
+       /* Send the fin only */
+       i40iw_term_modify_qp(qp,
+                            I40IW_QP_STATE_TERMINATE,
+                            I40IWQP_TERM_SEND_FIN_ONLY,
+                            0);
+}
+
+/**
+ * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+       u8 termlen = 0;
+
+       if (qp->term_flags & I40IW_TERM_SENT)
+               return;         /* Sanity check */
+
+       /* Eventtype can change from bld_terminate_hdr */
+       qp->eventtype = TERM_EVENT_QP_FATAL;
+       termlen = i40iw_bld_terminate_hdr(qp, info);
+       i40iw_terminate_start_timer(qp);
+       qp->term_flags |= I40IW_TERM_SENT;
+       i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
+                            I40IWQP_TERM_SEND_TERM_ONLY, termlen);
+}
+
+/**
+ * i40iw_terminate_received - handle terminate received AE
+ * @qp: qp associated with received terminate AE
+ * @info: the struct contiaing AE information
+ */
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
+{
+       u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
+       u32 *mpa;
+       u8 ddp_ctl;
+       u8 rdma_ctl;
+       u16 aeq_id = 0;
+       struct i40iw_terminate_hdr *termhdr;
+
+       mpa = (u32 *)i40iw_locate_mpa(pkt);
+       if (info->q2_data_written) {
+               /* did not validate the frame - do it now */
+               ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
+               rdma_ctl = ntohl(mpa[0]) & 0xff;
+               if ((ddp_ctl & 0xc0) != 0x40)
+                       aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
+               else if ((ddp_ctl & 0x03) != 1)
+                       aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
+               else if (ntohl(mpa[2]) != 2)
+                       aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
+               else if (ntohl(mpa[3]) != 1)
+                       aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
+               else if (ntohl(mpa[4]) != 0)
+                       aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
+               else if ((rdma_ctl & 0xc0) != 0x40)
+                       aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+               info->ae_id = aeq_id;
+               if (info->ae_id) {
+                       /* Bad terminate recvd - send back a terminate */
+                       i40iw_terminate_connection(qp, info);
+                       return;
+               }
+       }
+
+       qp->term_flags |= I40IW_TERM_RCVD;
+       qp->eventtype = TERM_EVENT_QP_FATAL;
+       termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
+       if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
+           termhdr->layer_etype == RDMAP_REMOTE_OP) {
+               i40iw_terminate_done(qp, 0);
+       } else {
+               i40iw_terminate_start_timer(qp);
+               i40iw_terminate_send_fin(qp);
+       }
+}
+
+/**
+ * i40iw_hw_stat_init - Initiliaze HW stats table
+ * @devstat: pestat struct
+ * @fcn_idx: PCI fn id
+ * @hw: PF i40iw_hw structure.
+ * @is_pf: Is it a PF?
+ *
+ * Populate the HW stat table with register offset addr for each
+ * stat. And start the perioidic stats timer.
+ */
+static void i40iw_hw_stat_init(struct i40iw_dev_pestat *devstat,
+                              u8 fcn_idx,
+                              struct i40iw_hw *hw, bool is_pf)
+{
+       u32 stat_reg_offset;
+       u32 stat_index;
+       struct i40iw_dev_hw_stat_offsets *stat_table =
+               &devstat->hw_stat_offsets;
+       struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+
+       devstat->hw = hw;
+
+       if (is_pf) {
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+                               I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+                               I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+                               I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+                               I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+                               I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+                               I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+                               I40E_GLPES_PFTCPRTXSEG(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+                               I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+                               I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
+
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+                               I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+                               I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+                               I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+                               I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+                               I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+                               I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+                               I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+                               I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+                               I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+                               I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+                               I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+                               I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+                               I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+                               I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+                               I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+                               I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+                               I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+                               I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+                               I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+                               I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+                               I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+                               I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+                               I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+                               I40E_GLPES_PFRDMAVINVLO(fcn_idx);
+       } else {
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
+                               I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
+                               I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
+                               I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
+                               I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
+                               I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
+                               I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
+                               I40E_GLPES_VFTCPRTXSEG(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
+                               I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
+               stat_table->stat_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
+                               I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
+
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
+                               I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
+                               I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
+                               I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
+                               I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
+                               I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
+                               I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
+                               I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
+                               I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
+                               I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
+                               I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
+                               I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
+                               I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
+                               I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
+                               I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
+                               I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
+                               I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
+                               I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
+                               I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
+                               I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
+                               I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
+                               I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
+                               I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
+                               I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
+                               I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
+               stat_table->stat_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
+                               I40E_GLPES_VFRDMAVINVLO(fcn_idx);
+       }
+
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+            stat_index++) {
+               stat_reg_offset = stat_table->stat_offset_64[stat_index];
+               last_rd_stats->stat_value_64[stat_index] =
+                       readq(devstat->hw->hw_addr + stat_reg_offset);
+       }
+
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+            stat_index++) {
+               stat_reg_offset = stat_table->stat_offset_32[stat_index];
+               last_rd_stats->stat_value_32[stat_index] =
+                       i40iw_rd32(devstat->hw, stat_reg_offset);
+       }
+}
+
+/**
+ * i40iw_hw_stat_read_32 - Read 32-bit HW stat counters and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_32(struct i40iw_dev_pestat *devstat,
+                                 enum i40iw_hw_stat_index_32b index,
+                                 u64 *value)
+{
+       struct i40iw_dev_hw_stat_offsets *stat_table =
+               &devstat->hw_stat_offsets;
+       struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+       struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+       u64 new_stat_value = 0;
+       u32 stat_reg_offset = stat_table->stat_offset_32[index];
+
+       new_stat_value = i40iw_rd32(devstat->hw, stat_reg_offset);
+       /*roll-over case */
+       if (new_stat_value < last_rd_stats->stat_value_32[index])
+               hw_stats->stat_value_32[index] += new_stat_value;
+       else
+               hw_stats->stat_value_32[index] +=
+                       new_stat_value - last_rd_stats->stat_value_32[index];
+       last_rd_stats->stat_value_32[index] = new_stat_value;
+       *value = hw_stats->stat_value_32[index];
+}
+
+/**
+ * i40iw_hw_stat_read_64 - Read HW stat counters (greater than 32-bit) and accommodates for roll-overs.
+ * @devstat: pestat struct
+ * @index: index in HW stat table which contains offset reg-addr
+ * @value: hw stat value
+ */
+static void i40iw_hw_stat_read_64(struct i40iw_dev_pestat *devstat,
+                                 enum i40iw_hw_stat_index_64b index,
+                                 u64 *value)
+{
+       struct i40iw_dev_hw_stat_offsets *stat_table =
+               &devstat->hw_stat_offsets;
+       struct i40iw_dev_hw_stats *last_rd_stats = &devstat->last_read_hw_stats;
+       struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+       u64 new_stat_value = 0;
+       u32 stat_reg_offset = stat_table->stat_offset_64[index];
+
+       new_stat_value = readq(devstat->hw->hw_addr + stat_reg_offset);
+       /*roll-over case */
+       if (new_stat_value < last_rd_stats->stat_value_64[index])
+               hw_stats->stat_value_64[index] += new_stat_value;
+       else
+               hw_stats->stat_value_64[index] +=
+                       new_stat_value - last_rd_stats->stat_value_64[index];
+       last_rd_stats->stat_value_64[index] = new_stat_value;
+       *value = hw_stats->stat_value_64[index];
+}
+
+/**
+ * i40iw_hw_stat_read_all - read all HW stat counters
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters and populates hw_stats structure
+ * of passed-in dev's pestat as well as copy created in stat_values.
+ */
+static void i40iw_hw_stat_read_all(struct i40iw_dev_pestat *devstat,
+                                  struct i40iw_dev_hw_stats *stat_values)
+{
+       u32 stat_index;
+
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+            stat_index++)
+               i40iw_hw_stat_read_32(devstat, stat_index,
+                                     &stat_values->stat_value_32[stat_index]);
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+            stat_index++)
+               i40iw_hw_stat_read_64(devstat, stat_index,
+                                     &stat_values->stat_value_64[stat_index]);
+}
+
+/**
+ * i40iw_hw_stat_refresh_all - Update all HW stat structs
+ * @devstat: pestat struct
+ * @stat_values: hw stats structure
+ *
+ * Read all the HW stat counters to refresh values in hw_stats structure
+ * of passed-in dev's pestat
+ */
+static void i40iw_hw_stat_refresh_all(struct i40iw_dev_pestat *devstat)
+{
+       u64 stat_value;
+       u32 stat_index;
+
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_32;
+            stat_index++)
+               i40iw_hw_stat_read_32(devstat, stat_index, &stat_value);
+       for (stat_index = 0; stat_index < I40IW_HW_STAT_INDEX_MAX_64;
+            stat_index++)
+               i40iw_hw_stat_read_64(devstat, stat_index, &stat_value);
+}
+
+static struct i40iw_cqp_ops iw_cqp_ops = {
+       i40iw_sc_cqp_init,
+       i40iw_sc_cqp_create,
+       i40iw_sc_cqp_post_sq,
+       i40iw_sc_cqp_get_next_send_wqe,
+       i40iw_sc_cqp_destroy,
+       i40iw_sc_poll_for_cqp_op_done
+};
+
+static struct i40iw_ccq_ops iw_ccq_ops = {
+       i40iw_sc_ccq_init,
+       i40iw_sc_ccq_create,
+       i40iw_sc_ccq_destroy,
+       i40iw_sc_ccq_create_done,
+       i40iw_sc_ccq_get_cqe_info,
+       i40iw_sc_ccq_arm
+};
+
+static struct i40iw_ceq_ops iw_ceq_ops = {
+       i40iw_sc_ceq_init,
+       i40iw_sc_ceq_create,
+       i40iw_sc_cceq_create_done,
+       i40iw_sc_cceq_destroy_done,
+       i40iw_sc_cceq_create,
+       i40iw_sc_ceq_destroy,
+       i40iw_sc_process_ceq
+};
+
+static struct i40iw_aeq_ops iw_aeq_ops = {
+       i40iw_sc_aeq_init,
+       i40iw_sc_aeq_create,
+       i40iw_sc_aeq_destroy,
+       i40iw_sc_get_next_aeqe,
+       i40iw_sc_repost_aeq_entries,
+       i40iw_sc_aeq_create_done,
+       i40iw_sc_aeq_destroy_done
+};
+
+/* iwarp pd ops */
+static struct i40iw_pd_ops iw_pd_ops = {
+       i40iw_sc_pd_init,
+};
+
+static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
+       i40iw_sc_qp_init,
+       i40iw_sc_qp_create,
+       i40iw_sc_qp_modify,
+       i40iw_sc_qp_destroy,
+       i40iw_sc_qp_flush_wqes,
+       i40iw_sc_qp_upload_context,
+       i40iw_sc_qp_setctx,
+       i40iw_sc_send_lsmm,
+       i40iw_sc_send_lsmm_nostag,
+       i40iw_sc_send_rtt,
+       i40iw_sc_post_wqe0,
+};
+
+static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
+       i40iw_sc_cq_init,
+       i40iw_sc_cq_create,
+       i40iw_sc_cq_destroy,
+       i40iw_sc_cq_modify,
+};
+
+static struct i40iw_mr_ops iw_mr_ops = {
+       i40iw_sc_alloc_stag,
+       i40iw_sc_mr_reg_non_shared,
+       i40iw_sc_mr_reg_shared,
+       i40iw_sc_dealloc_stag,
+       i40iw_sc_query_stag,
+       i40iw_sc_mw_alloc
+};
+
+static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
+       i40iw_sc_manage_push_page,
+       i40iw_sc_manage_hmc_pm_func_table,
+       i40iw_sc_set_hmc_resource_profile,
+       i40iw_sc_commit_fpm_values,
+       i40iw_sc_query_fpm_values,
+       i40iw_sc_static_hmc_pages_allocated,
+       i40iw_sc_add_arp_cache_entry,
+       i40iw_sc_del_arp_cache_entry,
+       i40iw_sc_query_arp_cache_entry,
+       i40iw_sc_manage_apbvt_entry,
+       i40iw_sc_manage_qhash_table_entry,
+       i40iw_sc_alloc_local_mac_ipaddr_entry,
+       i40iw_sc_add_local_mac_ipaddr_entry,
+       i40iw_sc_del_local_mac_ipaddr_entry,
+       i40iw_sc_cqp_nop,
+       i40iw_sc_commit_fpm_values_done,
+       i40iw_sc_query_fpm_values_done,
+       i40iw_sc_manage_hmc_pm_func_table_done,
+       i40iw_sc_suspend_qp,
+       i40iw_sc_resume_qp
+};
+
+static struct i40iw_hmc_ops iw_hmc_ops = {
+       i40iw_sc_init_iw_hmc,
+       i40iw_sc_parse_fpm_query_buf,
+       i40iw_sc_configure_iw_fpm,
+       i40iw_sc_parse_fpm_commit_buf,
+       i40iw_sc_create_hmc_obj,
+       i40iw_sc_del_hmc_obj,
+       NULL,
+       NULL
+};
+
+static const struct i40iw_device_pestat_ops iw_device_pestat_ops = {
+       i40iw_hw_stat_init,
+       i40iw_hw_stat_read_32,
+       i40iw_hw_stat_read_64,
+       i40iw_hw_stat_read_all,
+       i40iw_hw_stat_refresh_all
+};
+
+/**
+ * i40iw_device_init_pestat - Initialize the pestat structure
+ * @dev: pestat struct
+ */
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *devstat)
+{
+       devstat->ops = iw_device_pestat_ops;
+       return 0;
+}
+
+/**
+ * i40iw_device_init - Initialize IWARP device
+ * @dev: IWARP device pointer
+ * @info: IWARP init info
+ */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+                                        struct i40iw_device_init_info *info)
+{
+       u32 val;
+       u32 vchnl_ver = 0;
+       u16 hmc_fcn = 0;
+       enum i40iw_status_code ret_code = 0;
+       u8 db_size;
+
+       spin_lock_init(&dev->cqp_lock);
+       INIT_LIST_HEAD(&dev->cqp_cmd_head);             /* for the cqp commands backlog. */
+
+       i40iw_device_init_uk(&dev->dev_uk);
+
+       dev->debug_mask = info->debug_mask;
+
+       ret_code = i40iw_device_init_pestat(&dev->dev_pestat);
+       if (ret_code) {
+               i40iw_debug(dev, I40IW_DEBUG_DEV,
+                           "%s: i40iw_device_init_pestat failed\n", __func__);
+               return ret_code;
+       }
+       dev->hmc_fn_id = info->hmc_fn_id;
+       dev->qs_handle = info->qs_handle;
+       dev->exception_lan_queue = info->exception_lan_queue;
+       dev->is_pf = info->is_pf;
+
+       dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
+       dev->fpm_query_buf = info->fpm_query_buf;
+
+       dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
+       dev->fpm_commit_buf = info->fpm_commit_buf;
+
+       dev->hw = info->hw;
+       dev->hw->hw_addr = info->bar0;
+
+       val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
+       dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
+
+       if (dev->is_pf) {
+               dev->dev_pestat.ops.iw_hw_stat_init(&dev->dev_pestat,
+                       dev->hmc_fn_id, dev->hw, true);
+               spin_lock_init(&dev->dev_pestat.stats_lock);
+               /*start the periodic stats_timer */
+               i40iw_hw_stats_start_timer(dev);
+               val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
+               db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
+               if ((db_size != I40IW_PE_DB_SIZE_4M) &&
+                   (db_size != I40IW_PE_DB_SIZE_8M)) {
+                       i40iw_debug(dev, I40IW_DEBUG_DEV,
+                                   "%s: PE doorbell is not enabled in CSR val 0x%x\n",
+                                   __func__, val);
+                       ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
+                       return ret_code;
+               }
+               dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
+               dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
+       } else {
+               dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
+       }
+
+       dev->cqp_ops = &iw_cqp_ops;
+       dev->ccq_ops = &iw_ccq_ops;
+       dev->ceq_ops = &iw_ceq_ops;
+       dev->aeq_ops = &iw_aeq_ops;
+       dev->cqp_misc_ops = &iw_cqp_misc_ops;
+       dev->iw_pd_ops = &iw_pd_ops;
+       dev->iw_priv_qp_ops = &iw_priv_qp_ops;
+       dev->iw_priv_cq_ops = &iw_priv_cq_ops;
+       dev->mr_ops = &iw_mr_ops;
+       dev->hmc_ops = &iw_hmc_ops;
+       dev->vchnl_if.vchnl_send = info->vchnl_send;
+       if (dev->vchnl_if.vchnl_send)
+               dev->vchnl_up = true;
+       else
+               dev->vchnl_up = false;
+       if (!dev->is_pf) {
+               dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
+               ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
+               if (!ret_code) {
+                       i40iw_debug(dev, I40IW_DEBUG_DEV,
+                                   "%s: Get Channel version rc = 0x%0x, version is %u\n",
+                               __func__, ret_code, vchnl_ver);
+                       ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
+                       if (!ret_code) {
+                               i40iw_debug(dev, I40IW_DEBUG_DEV,
+                                           "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
+                                           __func__, ret_code, hmc_fcn);
+                               dev->hmc_fn_id = (u8)hmc_fcn;
+                       }
+               }
+       }
+       dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
+
+       return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h
new file mode 100644 (file)
index 0000000..aab88d6
--- /dev/null
@@ -0,0 +1,1713 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_D_H
+#define I40IW_D_H
+
+#define I40IW_DB_ADDR_OFFSET    (4 * 1024 * 1024 - 64 * 1024)
+#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
+
+#define I40IW_PUSH_OFFSET       (4 * 1024 * 1024)
+#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16
+#define I40IW_VF_PUSH_OFFSET    ((8 + 64) * 1024)
+#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2
+
+#define I40IW_PE_DB_SIZE_4M     1
+#define I40IW_PE_DB_SIZE_8M     2
+
+#define I40IW_DDP_VER 1
+#define I40IW_RDMAP_VER 1
+
+#define I40IW_RDMA_MODE_RDMAC 0
+#define I40IW_RDMA_MODE_IETF  1
+
+#define I40IW_QP_STATE_INVALID 0
+#define I40IW_QP_STATE_IDLE 1
+#define I40IW_QP_STATE_RTS 2
+#define I40IW_QP_STATE_CLOSING 3
+#define I40IW_QP_STATE_RESERVED 4
+#define I40IW_QP_STATE_TERMINATE 5
+#define I40IW_QP_STATE_ERROR 6
+
+#define I40IW_STAG_STATE_INVALID 0
+#define I40IW_STAG_STATE_VALID 1
+
+#define I40IW_STAG_TYPE_SHARED 0
+#define I40IW_STAG_TYPE_NONSHARED 1
+
+#define I40IW_MAX_USER_PRIORITY 8
+
+#define LS_64_1(val, bits)      ((u64)(uintptr_t)val << bits)
+#define RS_64_1(val, bits)      ((u64)(uintptr_t)val >> bits)
+#define LS_32_1(val, bits)      (u32)(val << bits)
+#define RS_32_1(val, bits)      (u32)(val >> bits)
+#define I40E_HI_DWORD(x)        ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+
+#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK))
+
+#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT)
+#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK))
+#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT)
+
+#define TERM_DDP_LEN_TAGGED     14
+#define TERM_DDP_LEN_UNTAGGED   18
+#define TERM_RDMA_LEN           28
+#define RDMA_OPCODE_MASK        0x0f
+#define RDMA_READ_REQ_OPCODE    1
+#define Q2_BAD_FRAME_OFFSET     72
+#define CQE_MAJOR_DRV           0x8000
+
+#define I40IW_TERM_SENT 0x01
+#define I40IW_TERM_RCVD 0x02
+#define I40IW_TERM_DONE 0x04
+#define I40IW_MAC_HLEN  14
+
+#define I40IW_INVALID_WQE_INDEX 0xffffffff
+
+#define I40IW_CQP_WAIT_POLL_REGS 1
+#define I40IW_CQP_WAIT_POLL_CQ 2
+#define I40IW_CQP_WAIT_EVENT 3
+
+#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64)
+
+#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \
+       ( \
+               &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)])  \
+       )
+#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \
+       ( \
+               &(((struct i40iw_extended_cqe *)        \
+                  ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \
+       )
+
+#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \
+       ( \
+               &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)]   \
+       )
+
+#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \
+       ( \
+               &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)]   \
+       )
+
+#define I40IW_AE_SOURCE_RQ              0x1
+#define I40IW_AE_SOURCE_RQ_0011         0x3
+
+#define I40IW_AE_SOURCE_CQ              0x2
+#define I40IW_AE_SOURCE_CQ_0110         0x6
+#define I40IW_AE_SOURCE_CQ_1010         0xA
+#define I40IW_AE_SOURCE_CQ_1110         0xE
+
+#define I40IW_AE_SOURCE_SQ              0x5
+#define I40IW_AE_SOURCE_SQ_0111         0x7
+
+#define I40IW_AE_SOURCE_IN_RR_WR        0x9
+#define I40IW_AE_SOURCE_IN_RR_WR_1011   0xB
+#define I40IW_AE_SOURCE_OUT_RR          0xD
+#define I40IW_AE_SOURCE_OUT_RR_1111     0xF
+
+#define I40IW_TCP_STATE_NON_EXISTENT 0
+#define I40IW_TCP_STATE_CLOSED 1
+#define I40IW_TCP_STATE_LISTEN 2
+#define I40IW_STATE_SYN_SEND 3
+#define I40IW_TCP_STATE_SYN_RECEIVED 4
+#define I40IW_TCP_STATE_ESTABLISHED 5
+#define I40IW_TCP_STATE_CLOSE_WAIT 6
+#define I40IW_TCP_STATE_FIN_WAIT_1 7
+#define I40IW_TCP_STATE_CLOSING  8
+#define I40IW_TCP_STATE_LAST_ACK 9
+#define I40IW_TCP_STATE_FIN_WAIT_2 10
+#define I40IW_TCP_STATE_TIME_WAIT 11
+#define I40IW_TCP_STATE_RESERVED_1 12
+#define I40IW_TCP_STATE_RESERVED_2 13
+#define I40IW_TCP_STATE_RESERVED_3 14
+#define I40IW_TCP_STATE_RESERVED_4 15
+
+/* ILQ CQP hash table fields */
+#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32
+#define I40IW_CQPSQ_QHASH_VLANID_MASK \
+       ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32
+#define I40IW_CQPSQ_QHASH_QPN_MASK \
+       ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16
+#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \
+       ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0
+#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \
+       ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR0_MASK \
+       ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR1_MASK \
+       ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32
+#define I40IW_CQPSQ_QHASH_ADDR2_MASK \
+       ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0
+#define I40IW_CQPSQ_QHASH_ADDR3_MASK \
+       ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \
+       ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT)
+#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_QHASH_OPCODE_MASK \
+       ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61
+#define I40IW_CQPSQ_QHASH_MANAGE_MASK \
+       ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60
+#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \
+       ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59
+#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \
+       ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT)
+
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42
+#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \
+       ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT)
+/* CQP Host Context */
+#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0
+#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT)
+
+#define I40IW_CQPHC_SQSIZE_SHIFT 8
+#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT)
+
+#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1
+#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT)
+
+#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32
+#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT)
+
+#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT)
+
+#define I40IW_CQPHC_SVER_SHIFT 24
+#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT)
+
+#define I40IW_CQPHC_SQBASE_SHIFT 9
+#define I40IW_CQPHC_SQBASE_MASK \
+       (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT)
+
+#define I40IW_CQPHC_QPCTX_SHIFT 0
+#define I40IW_CQPHC_QPCTX_MASK  \
+       (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT)
+#define I40IW_CQPHC_SVER        1
+
+#define I40IW_CQP_SW_SQSIZE_4 4
+#define I40IW_CQP_SW_SQSIZE_2048 2048
+
+/* iWARP QP Doorbell shadow area */
+#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0
+#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \
+       (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT)
+
+/* Completion Queue Doorbell shadow area */
+#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0
+#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT)
+
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0
+#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \
+       (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14
+#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15
+#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT)
+
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16
+#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \
+       (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT)
+
+/* CQP and iWARP Completion Queue */
+#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CCQ_OPRETVAL_SHIFT 0
+#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT)
+
+#define I40IW_CQ_MINERR_SHIFT 0
+#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT)
+
+#define I40IW_CQ_MAJERR_SHIFT 16
+#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT)
+
+#define I40IW_CQ_WQEIDX_SHIFT 32
+#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT)
+
+#define I40IW_CQ_ERROR_SHIFT 55
+#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT)
+
+#define I40IW_CQ_SQ_SHIFT 62
+#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT)
+
+#define I40IW_CQ_VALID_SHIFT 63
+#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT)
+
+#define I40IWCQ_PAYLDLEN_SHIFT 0
+#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT)
+
+#define I40IWCQ_TCPSEQNUM_SHIFT 32
+#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT)
+
+#define I40IWCQ_INVSTAG_SHIFT 0
+#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT)
+
+#define I40IWCQ_QPID_SHIFT 32
+#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT)
+
+#define I40IWCQ_PSHDROP_SHIFT 51
+#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT)
+
+#define I40IWCQ_SRQ_SHIFT 52
+#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT)
+
+#define I40IWCQ_STAG_SHIFT 53
+#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT)
+
+#define I40IWCQ_SOEVENT_SHIFT 54
+#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT)
+
+#define I40IWCQ_OP_SHIFT 56
+#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT)
+
+/* CEQE format */
+#define I40IW_CEQE_CQCTX_SHIFT 0
+#define I40IW_CEQE_CQCTX_MASK   \
+       (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT)
+
+#define I40IW_CEQE_VALID_SHIFT 63
+#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT)
+
+/* AEQE format */
+#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_AEQE_QPCQID_SHIFT 0
+#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT)
+
+#define I40IW_AEQE_WQDESCIDX_SHIFT 18
+#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT)
+
+#define I40IW_AEQE_OVERFLOW_SHIFT 33
+#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT)
+
+#define I40IW_AEQE_AECODE_SHIFT 34
+#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT)
+
+#define I40IW_AEQE_AESRC_SHIFT 50
+#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT)
+
+#define I40IW_AEQE_IWSTATE_SHIFT 54
+#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT)
+
+#define I40IW_AEQE_TCPSTATE_SHIFT 57
+#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT)
+
+#define I40IW_AEQE_Q2DATA_SHIFT 61
+#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT)
+
+#define I40IW_AEQE_VALID_SHIFT 63
+#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT)
+
+/* CQP SQ WQES */
+#define I40IW_QP_TYPE_IWARP     1
+#define I40IW_QP_TYPE_UDA       2
+#define I40IW_QP_TYPE_CQP       4
+
+#define I40IW_CQ_TYPE_IWARP     1
+#define I40IW_CQ_TYPE_ILQ       2
+#define I40IW_CQ_TYPE_IEQ       3
+#define I40IW_CQ_TYPE_CQP       4
+
+#define I40IWQP_TERM_SEND_TERM_AND_FIN          0
+#define I40IWQP_TERM_SEND_TERM_ONLY             1
+#define I40IWQP_TERM_SEND_FIN_ONLY              2
+#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN     3
+
+#define I40IW_CQP_OP_CREATE_QP                  0
+#define I40IW_CQP_OP_MODIFY_QP                  0x1
+#define I40IW_CQP_OP_DESTROY_QP                 0x02
+#define I40IW_CQP_OP_CREATE_CQ                  0x03
+#define I40IW_CQP_OP_MODIFY_CQ                  0x04
+#define I40IW_CQP_OP_DESTROY_CQ                 0x05
+#define I40IW_CQP_OP_CREATE_SRQ                 0x06
+#define I40IW_CQP_OP_MODIFY_SRQ                 0x07
+#define I40IW_CQP_OP_DESTROY_SRQ                0x08
+#define I40IW_CQP_OP_ALLOC_STAG                 0x09
+#define I40IW_CQP_OP_REG_MR                     0x0a
+#define I40IW_CQP_OP_QUERY_STAG                 0x0b
+#define I40IW_CQP_OP_REG_SMR                    0x0c
+#define I40IW_CQP_OP_DEALLOC_STAG               0x0d
+#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE    0x0e
+#define I40IW_CQP_OP_MANAGE_ARP                 0x0f
+#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP          0x10
+#define I40IW_CQP_OP_MANAGE_PUSH_PAGES          0x11
+#define I40IW_CQP_OP_MANAGE_PE_TEAM             0x12
+#define I40IW_CQP_OP_UPLOAD_CONTEXT             0x13
+#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14
+#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE   0x15
+#define I40IW_CQP_OP_CREATE_CEQ                 0x16
+#define I40IW_CQP_OP_DESTROY_CEQ                0x18
+#define I40IW_CQP_OP_CREATE_AEQ                 0x19
+#define I40IW_CQP_OP_DESTROY_AEQ                0x1b
+#define I40IW_CQP_OP_CREATE_ADDR_VECT           0x1c
+#define I40IW_CQP_OP_MODIFY_ADDR_VECT           0x1d
+#define I40IW_CQP_OP_DESTROY_ADDR_VECT          0x1e
+#define I40IW_CQP_OP_UPDATE_PE_SDS              0x1f
+#define I40IW_CQP_OP_QUERY_FPM_VALUES           0x20
+#define I40IW_CQP_OP_COMMIT_FPM_VALUES          0x21
+#define I40IW_CQP_OP_FLUSH_WQES                 0x22
+#define I40IW_CQP_OP_MANAGE_APBVT               0x23
+#define I40IW_CQP_OP_NOP                        0x24
+#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25
+#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP     0x26
+#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP     0x27
+#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP    0x28
+#define I40IW_CQP_OP_SUSPEND_QP                 0x29
+#define I40IW_CQP_OP_RESUME_QP                  0x2a
+#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED       0x2b
+#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE   0x2d
+
+#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16
+#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT)
+
+#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32
+#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT)
+
+#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56
+#define I40IW_UDA_QPSQ_MACLEN_MASK \
+       ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48
+#define I40IW_UDA_QPSQ_IPLEN_MASK \
+       ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4T_SHIFT 30
+#define I40IW_UDA_QPSQ_L4T_MASK \
+       ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT)
+
+#define I40IW_UDA_QPSQ_IIPT_SHIFT 28
+#define I40IW_UDA_QPSQ_IIPT_MASK \
+       ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT)
+
+#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24
+#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT)
+
+#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0
+#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT)
+
+#define I40IW_UDA_QPSQ_VALID_SHIFT 63
+#define I40IW_UDA_QPSQ_VALID_MASK \
+       ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT)
+
+#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62
+#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT)
+
+#define I40IW_UDA_PAYLOADLEN_SHIFT 0
+#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT)
+
+#define I40IW_UDA_HDRLEN_SHIFT 16
+#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT)
+
+#define I40IW_VLAN_TAG_VALID_SHIFT 50
+#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT)
+
+#define I40IW_UDA_L3PROTO_SHIFT 0
+#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT)
+
+#define I40IW_UDA_L4PROTO_SHIFT 16
+#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT)
+
+#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44
+#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \
+       ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT)
+
+/* CQP SQ WQE common fields */
+#define I40IW_CQPSQ_OPCODE_SHIFT 32
+#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT)
+
+#define I40IW_CQPSQ_WQEVALID_SHIFT 63
+#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT)
+
+#define I40IW_CQPSQ_TPHVAL_SHIFT 0
+#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT)
+
+#define I40IW_CQPSQ_TPHEN_SHIFT 60
+#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT)
+
+#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy QP */
+
+#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32
+#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48
+#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_QP_QPID_SHIFT 0
+#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+#define I40IW_CQPSQ_QP_OP_SHIFT 32
+#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK
+
+#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42
+#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43
+#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \
+       (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44
+#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK      \
+       (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_VQ_SHIFT 45
+#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT)
+
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46
+#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK       \
+       (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT)
+
+#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47
+#define I40IW_CQPSQ_QP_CQNUMVALID_MASK  \
+       (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52
+#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT)
+
+#define I40IW_CQPSQ_QP_STATRSRC_SHIFT 53
+#define I40IW_CQPSQ_QP_STATRSRC_MASK (1ULL << I40IW_CQPSQ_QP_STATRSRC_SHIFT)
+
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54
+#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK       \
+       (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT)
+
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55
+#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK     \
+       (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT)
+
+#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56
+#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT)
+
+#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58
+#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT)
+
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59
+#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK      \
+       (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT)
+
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60
+#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \
+       (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT)
+
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Create/Modify/Destroy CQ */
+#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK       \
+       (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0
+#define I40IW_CQPSQ_CQ_CQCTX_MASK       \
+       (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT)
+
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0
+#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK       \
+       (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24
+#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_OP_SHIFT 32
+#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43
+#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44
+#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46
+#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK         \
+       (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT)
+
+#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47
+#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT)
+
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48
+#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK  \
+       (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT)
+
+#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49
+#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK  \
+       (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT)
+
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61
+#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK      \
+       (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT)
+
+/* Create/Modify/Destroy Shared Receive Queue */
+
+#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0
+#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4
+#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \
+       (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32
+#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK   \
+       (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16
+#define I40IW_CQPSQ_SRQ_PDID_MASK       \
+       (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0
+#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT
+#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK
+
+#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT
+#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK
+
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61
+#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK      \
+       (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6
+#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK       \
+       (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT)
+
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK      \
+       (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Allocate/Register/Register Shared/Deallocate Stag */
+#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0
+#define I40IW_CQPSQ_STAG_STAGLEN_MASK   \
+       (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PDID_SHIFT 48
+#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_KEY_SHIFT 0
+#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT)
+
+#define I40IW_CQPSQ_STAG_IDX_SHIFT 8
+#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32
+#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK     \
+       (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_MR_SHIFT 43
+#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT)
+
+#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46
+#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \
+       (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT)
+
+#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48
+#define I40IW_CQPSQ_STAG_ARIGHTS_MASK   \
+       (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT)
+
+#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53
+#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK     \
+       (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT)
+
+#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59
+#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \
+       (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60
+#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK       \
+       (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61
+#define I40IW_CQPSQ_STAG_USEPFRID_MASK  \
+       (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT)
+
+#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \
+       (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT)
+
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK     \
+       (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT)
+
+/* Query stag */
+#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT
+#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK
+
+/* Allocate Local IP Address Entry */
+
+/* Manage Local IP Address Table - MLIPA */
+#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT  I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT  I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPV4_MASK \
+       (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK       \
+       (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42
+#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK        \
+       (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43
+#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK        \
+       (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62
+#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK        \
+       (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61
+#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK   \
+       (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0
+#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8
+#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16
+#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24
+#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32
+#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT)
+
+#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40
+#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT)
+
+/* Manage ARP Table  - MAT */
+#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0
+#define I40IW_CQPSQ_MAT_REACHMAX_MASK   \
+       (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0
+#define I40IW_CQPSQ_MAT_MACADDR_MASK    \
+       (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0
+#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK        \
+       (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT)
+
+#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42
+#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \
+       (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT)
+
+#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43
+#define I40IW_CQPSQ_MAT_PERMANENT_MASK  \
+       (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT)
+
+#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44
+#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT)
+
+/* Manage VF PBLE Backing Pages - MVPBP*/
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0
+#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \
+       (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16
+#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \
+       (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32
+#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \
+       (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62
+#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \
+       (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT)
+
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3
+#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \
+       (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT)
+
+/* Manage Push Page - MPP */
+#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff
+
+#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0
+#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \
+                                       I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT)
+
+#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0
+#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT)
+
+#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62
+#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT)
+
+/* Upload Context - UCTX */
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0
+#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48
+#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61
+#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \
+       (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT)
+
+#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62
+#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK  \
+       (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT)
+
+/* Manage HMC PM Function Table - MHMC */
+#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0
+#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT)
+
+#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62
+#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK  \
+       (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT)
+
+/* Set HMC Resource Profile - SHMCRP */
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0
+#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \
+       (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT)
+#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32
+#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT)
+
+/* Create/Destroy CEQ */
+#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \
+       (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0
+#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK      \
+       (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Create/Destroy AEQ */
+#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0
+#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \
+       (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT
+#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK
+
+#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47
+#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT)
+
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0
+#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK      \
+       (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT)
+
+/* Commit FPM Values - CFPM */
+#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT)
+
+/* Flush WQEs - FWQE */
+#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0
+#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16
+#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \
+       (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0
+#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \
+       (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16
+#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \
+       (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32
+#define I40IW_CQPSQ_FWQE_SQMNERR_MASK   \
+       (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48
+#define I40IW_CQPSQ_FWQE_SQMJERR_MASK   \
+       (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0
+#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59
+#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL <<      \
+                                          I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60
+#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK        \
+       (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61
+#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT)
+
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62
+#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT)
+
+/* Manage Accelerated Port Table - MAPT */
+#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0
+#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT)
+
+#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62
+#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT)
+
+/* Update Protocol Engine SDs */
+#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0
+#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK        \
+       (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32
+#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \
+       (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT)
+#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0
+#define I40IW_CQPSQ_UPESD_HMCFNID_MASK  \
+       (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63
+#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK      \
+       ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0
+#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK      \
+       (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT)
+
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7
+#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK       \
+       (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT)
+
+/* Suspend QP */
+#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* Resume QP */
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK      \
+       (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT)
+
+#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0
+#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL)
+/* I40IWCQ_QPID_MASK */
+
+/* IW QP Context */
+#define I40IWQPC_DDP_VER_SHIFT 0
+#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT)
+
+#define I40IWQPC_SNAP_SHIFT 2
+#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT)
+
+#define I40IWQPC_IPV4_SHIFT 3
+#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT)
+
+#define I40IWQPC_NONAGLE_SHIFT 4
+#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT)
+
+#define I40IWQPC_INSERTVLANTAG_SHIFT 5
+#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT)
+
+#define I40IWQPC_USESRQ_SHIFT 6
+#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_SHIFT 7
+#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT)
+
+#define I40IWQPC_RQWQESIZE_SHIFT 8
+#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT)
+
+#define I40IWQPC_INSERTL2TAG2_SHIFT 11
+#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT)
+
+#define I40IWQPC_LIMIT_SHIFT 12
+#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT)
+
+#define I40IWQPC_DROPOOOSEG_SHIFT 15
+#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT)
+
+#define I40IWQPC_DUPACK_THRESH_SHIFT 16
+#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19
+#define I40IWQPC_ERR_RQ_IDX_VALID_MASK  (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT)
+
+#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19
+#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT)
+
+#define I40IWQPC_RCVTPHEN_SHIFT 28
+#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT)
+
+#define I40IWQPC_XMITTPHEN_SHIFT 29
+#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT)
+
+#define I40IWQPC_RQTPHEN_SHIFT 30
+#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT)
+
+#define I40IWQPC_SQTPHEN_SHIFT 31
+#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT)
+
+#define I40IWQPC_PPIDX_SHIFT 32
+#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT)
+
+#define I40IWQPC_PMENA_SHIFT 47
+#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT)
+
+#define I40IWQPC_RDMAP_VER_SHIFT 62
+#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT)
+
+#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_TTL_SHIFT 0
+#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT)
+
+#define I40IWQPC_RQSIZE_SHIFT 8
+#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT)
+
+#define I40IWQPC_SQSIZE_SHIFT 12
+#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT)
+
+#define I40IWQPC_SRCMACADDRIDX_SHIFT 16
+#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT)
+
+#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23
+#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT)
+
+#define I40IWQPC_TOS_SHIFT 24
+#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT)
+
+#define I40IWQPC_SRCPORTNUM_SHIFT 32
+#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTPORTNUM_SHIFT 48
+#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT)
+
+#define I40IWQPC_DESTIPADDR0_SHIFT 32
+#define I40IWQPC_DESTIPADDR0_MASK       \
+       (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT)
+
+#define I40IWQPC_DESTIPADDR1_SHIFT 0
+#define I40IWQPC_DESTIPADDR1_MASK       \
+       (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT)
+
+#define I40IWQPC_DESTIPADDR2_SHIFT 32
+#define I40IWQPC_DESTIPADDR2_MASK       \
+       (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT)
+
+#define I40IWQPC_DESTIPADDR3_SHIFT 0
+#define I40IWQPC_DESTIPADDR3_MASK       \
+       (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT)
+
+#define I40IWQPC_SNDMSS_SHIFT 16
+#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT)
+
+#define I40IWQPC_VLANTAG_SHIFT 32
+#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT)
+
+#define I40IWQPC_ARPIDX_SHIFT 48
+#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT)
+
+#define I40IWQPC_FLOWLABEL_SHIFT 0
+#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT)
+
+#define I40IWQPC_WSCALE_SHIFT 20
+#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_SHIFT 21
+#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22
+#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT)
+
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23
+#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK        \
+       (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT)
+
+#define I40IWQPC_TCPSTATE_SHIFT 28
+#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT)
+
+#define I40IWQPC_RCVSCALE_SHIFT 32
+#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT)
+
+#define I40IWQPC_SNDSCALE_SHIFT 40
+#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT)
+
+#define I40IWQPC_PDIDX_SHIFT 48
+#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT)
+
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16
+#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK   \
+       (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT)
+
+#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24
+#define I40IWQPC_KEEPALIVE_INTERVAL_MASK        \
+       (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0
+#define I40IWQPC_TIMESTAMP_RECENT_MASK  \
+       (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT)
+
+#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32
+#define I40IWQPC_TIMESTAMP_AGE_MASK     \
+       (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT)
+
+#define I40IWQPC_SNDNXT_SHIFT 0
+#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT)
+
+#define I40IWQPC_SNDWND_SHIFT 32
+#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT)
+
+#define I40IWQPC_RCVNXT_SHIFT 0
+#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT)
+
+#define I40IWQPC_RCVWND_SHIFT 32
+#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT)
+
+#define I40IWQPC_SNDMAX_SHIFT 0
+#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT)
+
+#define I40IWQPC_SNDUNA_SHIFT 32
+#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT)
+
+#define I40IWQPC_SRTT_SHIFT 0
+#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT)
+
+#define I40IWQPC_RTTVAR_SHIFT 32
+#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT)
+
+#define I40IWQPC_SSTHRESH_SHIFT 0
+#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT)
+
+#define I40IWQPC_CWND_SHIFT 32
+#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT)
+
+#define I40IWQPC_SNDWL1_SHIFT 0
+#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT)
+
+#define I40IWQPC_SNDWL2_SHIFT 32
+#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT)
+
+#define I40IWQPC_ERR_RQ_IDX_SHIFT 32
+#define I40IWQPC_ERR_RQ_IDX_MASK  (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT)
+
+#define I40IWQPC_MAXSNDWND_SHIFT 0
+#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT)
+
+#define I40IWQPC_REXMIT_THRESH_SHIFT 48
+#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT)
+
+#define I40IWQPC_TXCQNUM_SHIFT 0
+#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT)
+
+#define I40IWQPC_RXCQNUM_SHIFT 32
+#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT)
+
+#define I40IWQPC_Q2ADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_Q2ADDR_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_LASTBYTESENT_SHIFT 0
+#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT)
+
+#define I40IWQPC_SRQID_SHIFT 32
+#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT)
+
+#define I40IWQPC_ORDSIZE_SHIFT 0
+#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT)
+
+#define I40IWQPC_IRDSIZE_SHIFT 16
+#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT)
+
+#define I40IWQPC_WRRDRSPOK_SHIFT 20
+#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT)
+
+#define I40IWQPC_RDOK_SHIFT 21
+#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT)
+
+#define I40IWQPC_SNDMARKERS_SHIFT 22
+#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT)
+
+#define I40IWQPC_BINDEN_SHIFT 23
+#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT)
+
+#define I40IWQPC_FASTREGEN_SHIFT 24
+#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT)
+
+#define I40IWQPC_PRIVEN_SHIFT 25
+#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT)
+
+#define I40IWQPC_LSMMPRESENT_SHIFT 26
+#define I40IWQPC_LSMMPRESENT_MASK (1UL << I40IWQPC_LSMMPRESENT_SHIFT)
+
+#define I40IWQPC_ADJUSTFORLSMM_SHIFT 27
+#define I40IWQPC_ADJUSTFORLSMM_MASK (1UL << I40IWQPC_ADJUSTFORLSMM_SHIFT)
+
+#define I40IWQPC_IWARPMODE_SHIFT 28
+#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT)
+
+#define I40IWQPC_RCVMARKERS_SHIFT 29
+#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT)
+
+#define I40IWQPC_ALIGNHDRS_SHIFT 30
+#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT)
+
+#define I40IWQPC_RCVNOMPACRC_SHIFT 31
+#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT)
+
+#define I40IWQPC_RCVMARKOFFSET_SHIFT 33
+#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT)
+
+#define I40IWQPC_SNDMARKOFFSET_SHIFT 48
+#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT)
+
+#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPC_SQTPHVAL_SHIFT 0
+#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT)
+
+#define I40IWQPC_RQTPHVAL_SHIFT 8
+#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT)
+
+#define I40IWQPC_QSHANDLE_SHIFT 16
+#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT)
+
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32
+#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL <<  \
+                                          I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR3_MASK \
+       (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR2_MASK     \
+       (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0
+#define I40IWQPC_LOCAL_IPADDR1_MASK     \
+       (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT)
+
+#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32
+#define I40IWQPC_LOCAL_IPADDR0_MASK     \
+       (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT)
+
+/* wqe size considering 32 bytes per wqe*/
+#define I40IWQP_SW_MIN_WQSIZE 4                /* 128 bytes */
+#define I40IWQP_SW_MAX_WQSIZE 16384    /* 524288 bytes */
+
+#define I40IWQP_OP_RDMA_WRITE 0
+#define I40IWQP_OP_RDMA_READ 1
+#define I40IWQP_OP_RDMA_SEND 3
+#define I40IWQP_OP_RDMA_SEND_INV 4
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5
+#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6
+#define I40IWQP_OP_BIND_MW 8
+#define I40IWQP_OP_FAST_REGISTER 9
+#define I40IWQP_OP_LOCAL_INVALIDATE 10
+#define I40IWQP_OP_RDMA_READ_LOC_INV 11
+#define I40IWQP_OP_NOP 12
+
+#define I40IW_RSVD_SHIFT        41
+#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT)
+
+/* iwarp QP SQ WQE common fields */
+#define I40IWQPSQ_OPCODE_SHIFT 32
+#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT)
+
+#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38
+#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT)
+
+#define I40IWQPSQ_PUSHWQE_SHIFT 56
+#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT)
+
+#define I40IWQPSQ_STREAMMODE_SHIFT 58
+#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT)
+
+#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59
+#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT)
+
+#define I40IWQPSQ_READFENCE_SHIFT 60
+#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT)
+
+#define I40IWQPSQ_LOCALFENCE_SHIFT 61
+#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT)
+
+#define I40IWQPSQ_SIGCOMPL_SHIFT 62
+#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT)
+
+#define I40IWQPSQ_VALID_SHIFT 63
+#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT)
+
+#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_FRAG_LEN_SHIFT 0
+#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT)
+
+#define I40IWQPSQ_FRAG_STAG_SHIFT 32
+#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT)
+
+#define I40IWQPSQ_REMSTAGINV_SHIFT 0
+#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT)
+
+#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57
+#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT)
+
+#define I40IWQPSQ_INLINEDATALEN_SHIFT 48
+#define I40IWQPSQ_INLINEDATALEN_MASK    \
+       (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT)
+
+/* iwarp send with push mode */
+#define I40IWQPSQ_WQDESCIDX_SHIFT 0
+#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT)
+
+/* rdma write */
+#define I40IWQPSQ_REMSTAG_SHIFT 0
+#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT)
+
+#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* memory window */
+#define I40IWQPSQ_STAGRIGHTS_SHIFT 48
+#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT)
+
+#define I40IWQPSQ_VABASEDTO_SHIFT 53
+#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT)
+
+#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0
+#define I40IWQPSQ_PARENTMRSTAG_MASK \
+       (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT)
+
+#define I40IWQPSQ_MWSTAG_SHIFT 32
+#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT)
+
+#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK
+
+/* Local Invalidate */
+#define I40IWQPSQ_LOCSTAG_SHIFT 32
+#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT)
+
+/* Fast Register */
+#define I40IWQPSQ_STAGKEY_SHIFT 0
+#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT)
+
+#define I40IWQPSQ_STAGINDEX_SHIFT 8
+#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT)
+
+#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43
+#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT)
+
+#define I40IWQPSQ_LPBLSIZE_SHIFT 44
+#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT)
+
+#define I40IWQPSQ_HPAGESIZE_SHIFT 46
+#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT)
+
+#define I40IWQPSQ_STAGLEN_SHIFT 0
+#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48
+#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK  \
+       (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT)
+
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0
+#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK  \
+       (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT)
+
+#define I40IWQPSQ_PBLADDR_SHIFT 12
+#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT)
+
+/*  iwarp QP RQ WQE common fields */
+#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT
+#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK
+
+#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT
+#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK
+
+#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT
+#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK
+
+#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT
+#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK
+
+#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT
+#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK
+
+#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT
+#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK
+
+/* Query FPM CQP buf */
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK               \
+       (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK               \
+       (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0
+#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK  \
+       (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32
+#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \
+       (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_QPS_MASK    \
+       (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CQS_MASK    \
+       (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT)
+
+#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0
+#define I40IW_QUERY_FPM_MAX_CEQS_MASK   \
+       (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT)
+
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK        \
+       (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32
+#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK        \
+       (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT)
+
+#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16
+#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK       \
+       (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT)
+
+#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32
+#define I40IW_QUERY_FPM_TIMERBUCKET_MASK        \
+       (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT)
+
+/* Static HMC pages allocated buf */
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0
+#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK        \
+       (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT)
+
+#define I40IW_HW_PAGE_SIZE     4096
+#define I40IW_DONE_COUNT       1000
+#define I40IW_SLEEP_COUNT      10
+
+enum {
+       I40IW_QUEUES_ALIGNMENT_MASK =           (128 - 1),
+       I40IW_AEQ_ALIGNMENT_MASK =              (256 - 1),
+       I40IW_Q2_ALIGNMENT_MASK =               (256 - 1),
+       I40IW_CEQ_ALIGNMENT_MASK =              (256 - 1),
+       I40IW_CQ0_ALIGNMENT_MASK =              (256 - 1),
+       I40IW_HOST_CTX_ALIGNMENT_MASK =         (4 - 1),
+       I40IW_SHADOWAREA_MASK =                 (128 - 1),
+       I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK =    0,
+       I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK =   0
+};
+
+enum i40iw_alignment {
+       I40IW_CQP_ALIGNMENT =           0x200,
+       I40IW_AEQ_ALIGNMENT =           0x100,
+       I40IW_CEQ_ALIGNMENT =           0x100,
+       I40IW_CQ0_ALIGNMENT =           0x100,
+       I40IW_SD_BUF_ALIGNMENT =        0x100
+};
+
+#define I40IW_QP_WQE_MIN_SIZE  32
+#define I40IW_QP_WQE_MAX_SIZE  128
+
+#define I40IW_CQE_QTYPE_RQ 0
+#define I40IW_CQE_QTYPE_SQ 1
+
+#define I40IW_RING_INIT(_ring, _size) \
+       { \
+               (_ring).head = 0; \
+               (_ring).tail = 0; \
+               (_ring).size = (_size); \
+       }
+#define I40IW_RING_GETSIZE(_ring) ((_ring).size)
+#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head)
+#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail)
+
+#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \
+       { \
+               register u32 size; \
+               size = (_ring).size;  \
+               if (!I40IW_RING_FULL_ERR(_ring)) { \
+                       (_ring).head = ((_ring).head + 1) % size; \
+                       (_retcode) = 0; \
+               } else { \
+                       (_retcode) = I40IW_ERR_RING_FULL; \
+               } \
+       }
+
+#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
+       { \
+               register u32 size; \
+               size = (_ring).size; \
+               if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \
+                       (_ring).head = ((_ring).head + (_count)) % size; \
+                       (_retcode) = 0; \
+               } else { \
+                       (_retcode) = I40IW_ERR_RING_FULL; \
+               } \
+       }
+
+#define I40IW_RING_MOVE_TAIL(_ring) \
+       (_ring).tail = ((_ring).tail + 1) % (_ring).size
+
+#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
+       (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+
+#define I40IW_RING_SET_TAIL(_ring, _pos) \
+       (_ring).tail = (_pos) % (_ring).size
+
+#define I40IW_RING_FULL_ERR(_ring) \
+       ( \
+               (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1))  \
+       )
+
+#define I40IW_ERR_RING_FULL2(_ring) \
+       ( \
+               (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2))  \
+       )
+
+#define I40IW_ERR_RING_FULL3(_ring) \
+       ( \
+               (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3))  \
+       )
+
+#define I40IW_RING_MORE_WORK(_ring) \
+       ( \
+               (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \
+       )
+
+#define I40IW_RING_WORK_AVAILABLE(_ring) \
+       ( \
+               (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+       )
+
+#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \
+       ( \
+               ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \
+       )
+
+#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
+       { \
+               index = I40IW_RING_GETCURRENT_HEAD(_ring); \
+               I40IW_RING_MOVE_HEAD(_ring, _retcode); \
+       }
+
+/* Async Events codes */
+#define I40IW_AE_AMP_UNALLOCATED_STAG                                   0x0102
+#define I40IW_AE_AMP_INVALID_STAG                                       0x0103
+#define I40IW_AE_AMP_BAD_QP                                             0x0104
+#define I40IW_AE_AMP_BAD_PD                                             0x0105
+#define I40IW_AE_AMP_BAD_STAG_KEY                                       0x0106
+#define I40IW_AE_AMP_BAD_STAG_INDEX                                     0x0107
+#define I40IW_AE_AMP_BOUNDS_VIOLATION                                   0x0108
+#define I40IW_AE_AMP_RIGHTS_VIOLATION                                   0x0109
+#define I40IW_AE_AMP_TO_WRAP                                            0x010a
+#define I40IW_AE_AMP_FASTREG_SHARED                                     0x010b
+#define I40IW_AE_AMP_FASTREG_VALID_STAG                                 0x010c
+#define I40IW_AE_AMP_FASTREG_MW_STAG                                    0x010d
+#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS                             0x010e
+#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW                         0x010f
+#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH                             0x0110
+#define I40IW_AE_AMP_INVALIDATE_SHARED                                  0x0111
+#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS                 0x0112
+#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS                   0x0113
+#define I40IW_AE_AMP_MWBIND_VALID_STAG                                  0x0114
+#define I40IW_AE_AMP_MWBIND_OF_MR_STAG                                  0x0115
+#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG                          0x0116
+#define I40IW_AE_AMP_MWBIND_TO_MW_STAG                                  0x0117
+#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS                              0x0118
+#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS                              0x0119
+#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT                           0x011a
+#define I40IW_AE_AMP_MWBIND_BIND_DISABLED                               0x011b
+#define I40IW_AE_AMP_WQE_INVALID_PARAMETER                              0x0130
+#define I40IW_AE_BAD_CLOSE                                              0x0201
+#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE                                0x0202
+#define I40IW_AE_CQ_OPERATION_ERROR                                     0x0203
+#define I40IW_AE_PRIV_OPERATION_DENIED                                  0x011c
+#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO                               0x0205
+#define I40IW_AE_STAG_ZERO_INVALID                                      0x0206
+#define I40IW_AE_IB_RREQ_AND_Q1_FULL                                    0x0207
+#define I40IW_AE_SRQ_LIMIT                                              0x0209
+#define I40IW_AE_WQE_UNEXPECTED_OPCODE                                  0x020a
+#define I40IW_AE_WQE_INVALID_PARAMETER                                  0x020b
+#define I40IW_AE_WQE_LSMM_TOO_LONG                                      0x0220
+#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN                             0x0301
+#define I40IW_AE_DDP_INVALID_MSN_RANGE_IS_NOT_VALID                     0x0302
+#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER      0x0303
+#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION                            0x0304
+#define I40IW_AE_DDP_UBE_INVALID_MO                                     0x0305
+#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE                0x0306
+#define I40IW_AE_DDP_UBE_INVALID_QN                                     0x0307
+#define I40IW_AE_DDP_NO_L_BIT                                           0x0308
+#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION                        0x0311
+#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE                            0x0312
+#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST                          0x0313
+#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP                    0x0314
+#define I40IW_AE_INVALID_ARP_ENTRY                                      0x0401
+#define I40IW_AE_INVALID_TCP_OPTION_RCVD                                0x0402
+#define I40IW_AE_STALE_ARP_ENTRY                                        0x0403
+#define I40IW_AE_INVALID_WQE_LENGTH                                     0x0404
+#define I40IW_AE_INVALID_MAC_ENTRY                                      0x0405
+#define I40IW_AE_LLP_CLOSE_COMPLETE                                     0x0501
+#define I40IW_AE_LLP_CONNECTION_RESET                                   0x0502
+#define I40IW_AE_LLP_FIN_RECEIVED                                       0x0503
+#define I40IW_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH       0x0504
+#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR                             0x0505
+#define I40IW_AE_LLP_SEGMENT_TOO_LARGE                                  0x0506
+#define I40IW_AE_LLP_SEGMENT_TOO_SMALL                                  0x0507
+#define I40IW_AE_LLP_SYN_RECEIVED                                       0x0508
+#define I40IW_AE_LLP_TERMINATE_RECEIVED                                 0x0509
+#define I40IW_AE_LLP_TOO_MANY_RETRIES                                   0x050a
+#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES                         0x050b
+#define I40IW_AE_LLP_DOUBT_REACHABILITY                                 0x050c
+#define I40IW_AE_LLP_RX_VLAN_MISMATCH                                   0x050d
+#define I40IW_AE_RESOURCE_EXHAUSTION                                    0x0520
+#define I40IW_AE_RESET_SENT                                             0x0601
+#define I40IW_AE_TERMINATE_SENT                                         0x0602
+#define I40IW_AE_RESET_NOT_SENT                                         0x0603
+#define I40IW_AE_LCE_QP_CATASTROPHIC                                    0x0700
+#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC                              0x0701
+#define I40IW_AE_LCE_CQ_CATASTROPHIC                                    0x0702
+#define I40IW_AE_UDA_XMIT_FRAG_SEQ                                      0x0800
+#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG                                0x0801
+#define I40IW_AE_UDA_XMIT_IPADDR_MISMATCH                               0x0802
+#define I40IW_AE_QP_SUSPEND_COMPLETE                                    0x0900
+
+#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY        1
+#define OP_CEQ_DESTROY                          2
+#define OP_AEQ_DESTROY                          3
+#define OP_DELETE_ARP_CACHE_ENTRY               4
+#define OP_MANAGE_APBVT_ENTRY                   5
+#define OP_CEQ_CREATE                           6
+#define OP_AEQ_CREATE                           7
+#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY         8
+#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY           9
+#define OP_MANAGE_QHASH_TABLE_ENTRY             10
+#define OP_QP_MODIFY                            11
+#define OP_QP_UPLOAD_CONTEXT                    12
+#define OP_CQ_CREATE                            13
+#define OP_CQ_DESTROY                           14
+#define OP_QP_CREATE                            15
+#define OP_QP_DESTROY                           16
+#define OP_ALLOC_STAG                           17
+#define OP_MR_REG_NON_SHARED                    18
+#define OP_DEALLOC_STAG                         19
+#define OP_MW_ALLOC                             20
+#define OP_QP_FLUSH_WQES                        21
+#define OP_ADD_ARP_CACHE_ENTRY                  22
+#define OP_MANAGE_PUSH_PAGE                     23
+#define OP_UPDATE_PE_SDS                        24
+#define OP_MANAGE_HMC_PM_FUNC_TABLE             25
+#define OP_SUSPEND                              26
+#define OP_RESUME                               27
+#define OP_MANAGE_VF_PBLE_BP                    28
+#define OP_QUERY_FPM_VALUES                     29
+#define OP_COMMIT_FPM_VALUES                    30
+#define OP_SIZE_CQP_STAT_ARRAY                  31
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c
new file mode 100644 (file)
index 0000000..5484cbf
--- /dev/null
@@ -0,0 +1,821 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * i40iw_find_sd_index_limit - finds segment descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @type: type of HMC resources we're searching
+ * @index: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @sd_idx: pointer to return index of the segment descriptor in question
+ * @sd_limit: pointer to return the maximum number of segment descriptors
+ *
+ * This function calculates the segment descriptor index and index limit
+ * for the resource defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
+                                            u32 type,
+                                            u32 idx,
+                                            u32 cnt,
+                                            u32 *sd_idx,
+                                            u32 *sd_limit)
+{
+       u64 fpm_addr, fpm_limit;
+
+       fpm_addr = hmc_info->hmc_obj[(type)].base +
+                       hmc_info->hmc_obj[type].size * idx;
+       fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
+       *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
+       *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
+       *sd_limit += 1;
+}
+
+/**
+ * i40iw_find_pd_index_limit - finds page descriptor index limit
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @type: HMC resource type we're examining
+ * @idx: starting index for the object
+ * @cnt: number of objects we're trying to create
+ * @pd_index: pointer to return page descriptor index
+ * @pd_limit: pointer to return page descriptor index limit
+ *
+ * Calculates the page descriptor index and index limit for the resource
+ * defined by i40iw_hmc_rsrc_type.
+ */
+
+static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
+                                            u32 type,
+                                            u32 idx,
+                                            u32 cnt,
+                                            u32 *pd_idx,
+                                            u32 *pd_limit)
+{
+       u64 fpm_adr, fpm_limit;
+
+       fpm_adr = hmc_info->hmc_obj[type].base +
+                       hmc_info->hmc_obj[type].size * idx;
+       fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
+       *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
+       *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
+       *(pd_limit) += 1;
+}
+
+/**
+ * i40iw_set_sd_entry - setup entry for sd programming
+ * @pa: physical addr
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_set_sd_entry(u64 pa,
+                                     u32 idx,
+                                     enum i40iw_sd_entry_type type,
+                                     struct update_sd_entry *entry)
+{
+       entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+                       (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+                               I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
+                       (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
+       entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_clr_sd_entry - setup entry for sd clear
+ * @idx: sd index
+ * @type: paged or direct sd
+ * @entry: sd entry ptr
+ */
+static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
+                                     struct update_sd_entry *entry)
+{
+       entry->data = (I40IW_HMC_MAX_BP_COUNT <<
+                       I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
+                       (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
+                               I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
+       entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
+}
+
+/**
+ * i40iw_hmc_sd_one - setup 1 sd entry for cqp
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ * @pa: physical addr
+ * @sd_idx: sd index
+ * @type: paged or direct sd
+ * @setsd: flag to set or clear sd
+ */
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
+                                       u8 hmc_fn_id,
+                                       u64 pa, u32 sd_idx,
+                                       enum i40iw_sd_entry_type type,
+                                       bool setsd)
+{
+       struct i40iw_update_sds_info sdinfo;
+
+       sdinfo.cnt = 1;
+       sdinfo.hmc_fn_id = hmc_fn_id;
+       if (setsd)
+               i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
+       else
+               i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
+
+       return dev->cqp->process_cqp_sds(dev, &sdinfo);
+}
+
+/**
+ * i40iw_hmc_sd_grp - setup group od sd entries for cqp
+ * @dev: pointer to the device structure
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: sd index
+ * @sd_cnt: number of sd entries
+ * @setsd: flag to set or clear sd
+ */
+static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
+                                              struct i40iw_hmc_info *hmc_info,
+                                              u32 sd_index,
+                                              u32 sd_cnt,
+                                              bool setsd)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+       struct i40iw_update_sds_info sdinfo;
+       u64 pa;
+       u32 i;
+       enum i40iw_status_code ret_code = 0;
+
+       memset(&sdinfo, 0, sizeof(sdinfo));
+       sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
+       for (i = sd_index; i < sd_index + sd_cnt; i++) {
+               sd_entry = &hmc_info->sd_table.sd_entry[i];
+               if (!sd_entry ||
+                   (!sd_entry->valid && setsd) ||
+                   (sd_entry->valid && !setsd))
+                       continue;
+               if (setsd) {
+                       pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+                           sd_entry->u.pd_table.pd_page_addr.pa :
+                           sd_entry->u.bp.addr.pa;
+                       i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
+                                          &sdinfo.entry[sdinfo.cnt]);
+               } else {
+                       i40iw_clr_sd_entry(i, sd_entry->entry_type,
+                                          &sdinfo.entry[sdinfo.cnt]);
+               }
+               sdinfo.cnt++;
+               if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
+                       ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+                       if (ret_code) {
+                               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                                           "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
+                                           ret_code);
+                               return ret_code;
+                       }
+                       sdinfo.cnt = 0;
+               }
+       }
+       if (sdinfo.cnt)
+               ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
+
+       return ret_code;
+}
+
+/**
+ * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
+{
+       struct i40iw_vfdev *vf_dev = NULL;
+       u16 idx;
+
+       for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+               if (dev->vf_dev[idx] &&
+                   ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+                       vf_dev = dev->vf_dev[idx];
+                       break;
+               }
+       }
+       return vf_dev;
+}
+
+/**
+ * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id
+ * @dev: pointer to the device structure
+ * @hmc_fn_id: hmc's function id
+ */
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+                                                u8 hmc_fn_id)
+{
+       struct i40iw_hmc_info *hmc_info = NULL;
+       u16 idx;
+
+       for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
+               if (dev->vf_dev[idx] &&
+                   ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
+                       hmc_info = &dev->vf_dev[idx]->hmc_info;
+                       break;
+               }
+       }
+       return hmc_info;
+}
+
+/**
+ * i40iw_hmc_finish_add_sd_reg - program sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: create obj info
+ */
+static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
+                                                         struct i40iw_hmc_create_obj_info *info)
+{
+       if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+               return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+       if ((info->start_idx + info->count) >
+                       info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+               return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+
+       if (!info->add_sd_cnt)
+               return 0;
+
+       return i40iw_hmc_sd_grp(dev, info->hmc_info,
+                               info->hmc_info->sd_indexes[0],
+                               info->add_sd_cnt, true);
+}
+
+/**
+ * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_iw_create_obj_info struct
+ *
+ * This will allocate memory for PDs and backing pages and populate
+ * the sd and pd entries.
+ */
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+                                              struct i40iw_hmc_create_obj_info *info)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+       u32 sd_idx, sd_lmt;
+       u32 pd_idx = 0, pd_lmt = 0;
+       u32 pd_idx1 = 0, pd_lmt1 = 0;
+       u32 i, j;
+       bool pd_error = false;
+       enum i40iw_status_code ret_code = 0;
+
+       if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
+               return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+
+       if ((info->start_idx + info->count) >
+           info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
+                           __func__, info->rsrc_type, info->start_idx, info->count,
+                           info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+               return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+       }
+
+       if (!dev->is_pf)
+               return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
+
+       i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+                                 info->start_idx, info->count,
+                                 &sd_idx, &sd_lmt);
+       if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+           sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+               return I40IW_ERR_INVALID_SD_INDEX;
+       }
+       i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+                                 info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+       for (j = sd_idx; j < sd_lmt; j++) {
+               ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
+                                                   j,
+                                                   info->entry_type,
+                                                   I40IW_HMC_DIRECT_BP_SIZE);
+               if (ret_code)
+                       goto exit_sd_error;
+               sd_entry = &info->hmc_info->sd_table.sd_entry[j];
+
+               if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
+                   ((dev->hmc_info == info->hmc_info) &&
+                    (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
+                       pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
+                       pd_lmt1 = min(pd_lmt,
+                                     (j + 1) * I40IW_HMC_MAX_BP_COUNT);
+                       for (i = pd_idx1; i < pd_lmt1; i++) {
+                               /* update the pd table entry */
+                               ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
+                                                                   i, NULL);
+                               if (ret_code) {
+                                       pd_error = true;
+                                       break;
+                               }
+                       }
+                       if (pd_error) {
+                               while (i && (i > pd_idx1)) {
+                                       i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
+                                                          info->is_pf);
+                                       i--;
+                               }
+                       }
+               }
+               if (sd_entry->valid)
+                       continue;
+
+               info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
+               info->add_sd_cnt++;
+               sd_entry->valid = true;
+       }
+       return i40iw_hmc_finish_add_sd_reg(dev, info);
+
+exit_sd_error:
+       while (j && (j > sd_idx)) {
+               sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
+               switch (sd_entry->entry_type) {
+               case I40IW_SD_TYPE_PAGED:
+                       pd_idx1 = max(pd_idx,
+                                     (j - 1) * I40IW_HMC_MAX_BP_COUNT);
+                       pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
+                       for (i = pd_idx1; i < pd_lmt1; i++)
+                               i40iw_prep_remove_pd_page(info->hmc_info, i);
+                       break;
+               case I40IW_SD_TYPE_DIRECT:
+                       i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
+                       break;
+               default:
+                       ret_code = I40IW_ERR_INVALID_SD_TYPE;
+                       break;
+               }
+               j--;
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_finish_del_sd_reg - delete sd entries for objects
+ * @dev: pointer to the device structure
+ * @info: dele obj info
+ * @reset: true if called before reset
+ */
+static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
+                                                     struct i40iw_hmc_del_obj_info *info,
+                                                     bool reset)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+       enum i40iw_status_code ret_code = 0;
+       u32 i, sd_idx;
+       struct i40iw_dma_mem *mem;
+
+       if (dev->is_pf && !reset)
+               ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
+                                           info->hmc_info->sd_indexes[0],
+                                           info->del_sd_cnt, false);
+
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
+
+       for (i = 0; i < info->del_sd_cnt; i++) {
+               sd_idx = info->hmc_info->sd_indexes[i];
+               sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
+               if (!sd_entry)
+                       continue;
+               mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
+                       &sd_entry->u.pd_table.pd_page_addr :
+                       &sd_entry->u.bp.addr;
+
+               if (!mem || !mem->va)
+                       i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
+               else
+                       i40iw_free_dma_mem(dev->hw, mem);
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_del_iw_hmc_obj - remove pe hmc objects
+ * @dev: pointer to the device structure
+ * @info: pointer to i40iw_hmc_del_obj_info struct
+ * @reset: true if called before reset
+ *
+ * This will de-populate the SDs and PDs.  It frees
+ * the memory for PDS and backing storage.  After this function is returned,
+ * caller should deallocate memory allocated previously for
+ * book-keeping information about PDs and backing storage.
+ */
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_del_obj_info *info,
+                                           bool reset)
+{
+       struct i40iw_hmc_pd_table *pd_table;
+       u32 sd_idx, sd_lmt;
+       u32 pd_idx, pd_lmt, rel_pd_idx;
+       u32 i, j;
+       enum i40iw_status_code ret_code = 0;
+
+       if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s: error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
+                           __func__, info->start_idx, info->rsrc_type,
+                           info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+               return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
+       }
+
+       if ((info->start_idx + info->count) >
+           info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC,
+                           "%s: error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
+                           __func__, info->start_idx, info->count,
+                           info->rsrc_type,
+                           info->hmc_info->hmc_obj[info->rsrc_type].cnt);
+               return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
+       }
+       if (!dev->is_pf) {
+               ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
+                                                     info->count);
+               if (info->rsrc_type != I40IW_HMC_IW_PBLE)
+                       return ret_code;
+       }
+
+       i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
+                                 info->start_idx, info->count, &pd_idx, &pd_lmt);
+
+       for (j = pd_idx; j < pd_lmt; j++) {
+               sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
+
+               if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
+                   I40IW_SD_TYPE_PAGED)
+                       continue;
+
+               rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
+               pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+               if (pd_table->pd_entry[rel_pd_idx].valid) {
+                       ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
+                                                     info->is_pf);
+                       if (ret_code) {
+                               i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
+                               return ret_code;
+                       }
+               }
+       }
+
+       i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
+                                 info->start_idx, info->count, &sd_idx, &sd_lmt);
+       if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
+           sd_lmt > info->hmc_info->sd_table.sd_cnt) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
+               return I40IW_ERR_INVALID_SD_INDEX;
+       }
+
+       for (i = sd_idx; i < sd_lmt; i++) {
+               if (!info->hmc_info->sd_table.sd_entry[i].valid)
+                       continue;
+               switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
+               case I40IW_SD_TYPE_DIRECT:
+                       ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
+                       if (!ret_code) {
+                               info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+                               info->del_sd_cnt++;
+                       }
+                       break;
+               case I40IW_SD_TYPE_PAGED:
+                       ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
+                       if (!ret_code) {
+                               info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
+                               info->del_sd_cnt++;
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+       return i40iw_finish_del_sd_reg(dev, info, reset);
+}
+
+/**
+ * i40iw_add_sd_table_entry - Adds a segment descriptor to the table
+ * @hw: pointer to our hw struct
+ * @hmc_info: pointer to the HMC configuration information struct
+ * @sd_index: segment descriptor index to manipulate
+ * @type: what type of segment descriptor we're manipulating
+ * @direct_mode_sz: size to alloc in direct mode
+ */
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+                                               struct i40iw_hmc_info *hmc_info,
+                                               u32 sd_index,
+                                               enum i40iw_sd_entry_type type,
+                                               u64 direct_mode_sz)
+{
+       enum i40iw_status_code ret_code = 0;
+       struct i40iw_hmc_sd_entry *sd_entry;
+       bool dma_mem_alloc_done = false;
+       struct i40iw_dma_mem mem;
+       u64 alloc_len;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
+       if (!sd_entry->valid) {
+               if (type == I40IW_SD_TYPE_PAGED)
+                       alloc_len = I40IW_HMC_PAGED_BP_SIZE;
+               else
+                       alloc_len = direct_mode_sz;
+
+               /* allocate a 4K pd page or 2M backing page */
+               ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
+                                                 I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+               if (ret_code)
+                       goto exit;
+               dma_mem_alloc_done = true;
+               if (type == I40IW_SD_TYPE_PAGED) {
+                       ret_code = i40iw_allocate_virt_mem(hw,
+                                                          &sd_entry->u.pd_table.pd_entry_virt_mem,
+                                                          sizeof(struct i40iw_hmc_pd_entry) * 512);
+                       if (ret_code)
+                               goto exit;
+                       sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
+                                                        sd_entry->u.pd_table.pd_entry_virt_mem.va;
+
+                       memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
+               } else {
+                       memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
+                       sd_entry->u.bp.sd_pd_index = sd_index;
+               }
+
+               hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
+
+               I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
+       }
+       if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
+               I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
+exit:
+       if (ret_code)
+               if (dma_mem_alloc_done)
+                       i40iw_free_dma_mem(hw, &mem);
+
+       return ret_code;
+}
+
+/**
+ * i40iw_add_pd_table_entry - Adds page descriptor to the specified table
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @pd_index: which page descriptor index to manipulate
+ * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
+ *
+ * This function:
+ *     1. Initializes the pd entry
+ *     2. Adds pd_entry in the pd_table
+ *     3. Mark the entry valid in i40iw_hmc_pd_entry structure
+ *     4. Initializes the pd_entry's ref count to 1
+ * assumptions:
+ *     1. The memory for pd should be pinned down, physically contiguous and
+ *        aligned on 4K boundary and zeroed memory.
+ *     2. It should be 4K in size.
+ */
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+                                               struct i40iw_hmc_info *hmc_info,
+                                               u32 pd_index,
+                                               struct i40iw_dma_mem *rsrc_pg)
+{
+       enum i40iw_status_code ret_code = 0;
+       struct i40iw_hmc_pd_table *pd_table;
+       struct i40iw_hmc_pd_entry *pd_entry;
+       struct i40iw_dma_mem mem;
+       struct i40iw_dma_mem *page = &mem;
+       u32 sd_idx, rel_pd_idx;
+       u64 *pd_addr;
+       u64 page_desc;
+
+       if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
+               return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+       sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
+       if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
+               return 0;
+
+       rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
+       pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+       pd_entry = &pd_table->pd_entry[rel_pd_idx];
+       if (!pd_entry->valid) {
+               if (rsrc_pg) {
+                       pd_entry->rsrc_pg = true;
+                       page = rsrc_pg;
+               } else {
+                       ret_code = i40iw_allocate_dma_mem(hw, page,
+                                                         I40IW_HMC_PAGED_BP_SIZE,
+                                                         I40IW_HMC_PD_BP_BUF_ALIGNMENT);
+                       if (ret_code)
+                               return ret_code;
+                       pd_entry->rsrc_pg = false;
+               }
+
+               memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
+               pd_entry->bp.sd_pd_index = pd_index;
+               pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
+               page_desc = page->pa | 0x1;
+
+               pd_addr = (u64 *)pd_table->pd_page_addr.va;
+               pd_addr += rel_pd_idx;
+
+               memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
+
+               pd_entry->sd_index = sd_idx;
+               pd_entry->valid = true;
+               I40IW_INC_PD_REFCNT(pd_table);
+               if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
+                       I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
+               else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
+                       I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
+                                                  hmc_info->hmc_fn_id);
+       }
+       I40IW_INC_BP_REFCNT(&pd_entry->bp);
+
+       return 0;
+}
+
+/**
+ * i40iw_remove_pd_bp - remove a backing page from a page descriptor
+ * @hw: pointer to our HW structure
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ * @is_pf: distinguishes a VF from a PF
+ *
+ * This function:
+ *     1. Marks the entry in pd table (for paged address mode) or in sd table
+ *        (for direct address mode) invalid.
+ *     2. Write to register PMPDINV to invalidate the backing page in FV cache
+ *     3. Decrement the ref count for the pd _entry
+ * assumptions:
+ *     1. Caller can deallocate the memory used by backing storage after this
+ *        function returns.
+ */
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+                                         struct i40iw_hmc_info *hmc_info,
+                                         u32 idx,
+                                         bool is_pf)
+{
+       struct i40iw_hmc_pd_entry *pd_entry;
+       struct i40iw_hmc_pd_table *pd_table;
+       struct i40iw_hmc_sd_entry *sd_entry;
+       u32 sd_idx, rel_pd_idx;
+       struct i40iw_dma_mem *mem;
+       u64 *pd_addr;
+
+       sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
+       rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
+       if (sd_idx >= hmc_info->sd_table.sd_cnt)
+               return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
+       if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
+               return I40IW_ERR_INVALID_SD_TYPE;
+
+       pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
+       pd_entry = &pd_table->pd_entry[rel_pd_idx];
+       I40IW_DEC_BP_REFCNT(&pd_entry->bp);
+       if (pd_entry->bp.ref_cnt)
+               return 0;
+
+       pd_entry->valid = false;
+       I40IW_DEC_PD_REFCNT(pd_table);
+       pd_addr = (u64 *)pd_table->pd_page_addr.va;
+       pd_addr += rel_pd_idx;
+       memset(pd_addr, 0, sizeof(u64));
+       if (is_pf)
+               I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
+       else
+               I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
+                                          hmc_info->hmc_fn_id);
+
+       if (!pd_entry->rsrc_pg) {
+               mem = &pd_entry->bp.addr;
+               if (!mem || !mem->va)
+                       return I40IW_ERR_PARAM;
+               i40iw_free_dma_mem(hw, mem);
+       }
+       if (!pd_table->ref_cnt)
+               i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
+
+       return 0;
+}
+
+/**
+ * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: the page index
+ */
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+       I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
+       if (sd_entry->u.bp.ref_cnt)
+               return I40IW_ERR_NOT_READY;
+
+       I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+       sd_entry->valid = false;
+
+       return 0;
+}
+
+/**
+ * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
+ * @hmc_info: pointer to the HMC configuration information structure
+ * @idx: segment descriptor index to find the relevant page descriptor
+ */
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
+                                                u32 idx)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+
+       sd_entry = &hmc_info->sd_table.sd_entry[idx];
+
+       if (sd_entry->u.pd_table.ref_cnt)
+               return I40IW_ERR_NOT_READY;
+
+       sd_entry->valid = false;
+       I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
+
+       return 0;
+}
+
+/**
+ * i40iw_pf_init_vfhmc -
+ * @vf_cnt_array: array of cnt values of iwarp hmc objects
+ * @vf_hmc_fn_id: hmc function id ofr vf driver
+ * @dev: pointer to i40iw_dev struct
+ *
+ * Called by pf driver to initialize hmc_info for vf driver instance.
+ */
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
+                                          u8 vf_hmc_fn_id,
+                                          u32 *vf_cnt_array)
+{
+       struct i40iw_hmc_info *hmc_info;
+       enum i40iw_status_code ret_code = 0;
+       u32 i;
+
+       if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
+           (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
+            I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+               i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id  0x%x\n",
+                           __func__, vf_hmc_fn_id);
+               return I40IW_ERR_INVALID_HMCFN_ID;
+       }
+
+       ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
+       if (ret_code)
+               return ret_code;
+
+       hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
+
+       for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
+               if (vf_cnt_array)
+                       hmc_info->hmc_obj[i].cnt =
+                           vf_cnt_array[i - I40IW_HMC_IW_QP];
+               else
+                       hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h
new file mode 100644 (file)
index 0000000..4c3fdd8
--- /dev/null
@@ -0,0 +1,241 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_HMC_H
+#define I40IW_HMC_H
+
+#include "i40iw_d.h"
+
+struct i40iw_hw;
+enum i40iw_status_code;
+
+#define I40IW_HMC_MAX_BP_COUNT 512
+#define I40IW_MAX_SD_ENTRIES 11
+#define I40IW_HW_DBG_HMC_INVALID_BP_MARK     0xCA
+
+#define I40IW_HMC_INFO_SIGNATURE       0x484D5347
+#define I40IW_HMC_PD_CNT_IN_SD         512
+#define I40IW_HMC_DIRECT_BP_SIZE       0x200000
+#define I40IW_HMC_MAX_SD_COUNT         4096
+#define I40IW_HMC_PAGED_BP_SIZE                4096
+#define I40IW_HMC_PD_BP_BUF_ALIGNMENT  4096
+#define I40IW_FIRST_VF_FPM_ID          16
+#define FPM_MULTIPLIER                 1024
+
+#define I40IW_INC_SD_REFCNT(sd_table)   ((sd_table)->ref_cnt++)
+#define I40IW_INC_PD_REFCNT(pd_table)   ((pd_table)->ref_cnt++)
+#define I40IW_INC_BP_REFCNT(bp)         ((bp)->ref_cnt++)
+
+#define I40IW_DEC_SD_REFCNT(sd_table)   ((sd_table)->ref_cnt--)
+#define I40IW_DEC_PD_REFCNT(pd_table)   ((pd_table)->ref_cnt--)
+#define I40IW_DEC_BP_REFCNT(bp)         ((bp)->ref_cnt--)
+
+/**
+ * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ */
+#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx)                  \
+       i40iw_wr32((hw), I40E_PFHMC_PDINV,                                    \
+               (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |             \
+               (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \
+               ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+/**
+ * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware
+ * @hw: pointer to our hw struct
+ * @sd_idx: segment descriptor index
+ * @pd_idx: page descriptor index
+ * @hmc_fn_id: VF's function id
+ */
+#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id)        \
+       i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID),  \
+            ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) |              \
+             (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
+
+struct i40iw_hmc_obj_info {
+       u64 base;
+       u32 max_cnt;
+       u32 cnt;
+       u64 size;
+};
+
+enum i40iw_sd_entry_type {
+       I40IW_SD_TYPE_INVALID = 0,
+       I40IW_SD_TYPE_PAGED = 1,
+       I40IW_SD_TYPE_DIRECT = 2
+};
+
+struct i40iw_hmc_bp {
+       enum i40iw_sd_entry_type entry_type;
+       struct i40iw_dma_mem addr;
+       u32 sd_pd_index;
+       u32 ref_cnt;
+};
+
+struct i40iw_hmc_pd_entry {
+       struct i40iw_hmc_bp bp;
+       u32 sd_index;
+       bool rsrc_pg;
+       bool valid;
+};
+
+struct i40iw_hmc_pd_table {
+       struct i40iw_dma_mem pd_page_addr;
+       struct i40iw_hmc_pd_entry *pd_entry;
+       struct i40iw_virt_mem pd_entry_virt_mem;
+       u32 ref_cnt;
+       u32 sd_index;
+};
+
+struct i40iw_hmc_sd_entry {
+       enum i40iw_sd_entry_type entry_type;
+       bool valid;
+
+       union {
+               struct i40iw_hmc_pd_table pd_table;
+               struct i40iw_hmc_bp bp;
+       } u;
+};
+
+struct i40iw_hmc_sd_table {
+       struct i40iw_virt_mem addr;
+       u32 sd_cnt;
+       u32 ref_cnt;
+       struct i40iw_hmc_sd_entry *sd_entry;
+};
+
+struct i40iw_hmc_info {
+       u32 signature;
+       u8 hmc_fn_id;
+       u16 first_sd_index;
+
+       struct i40iw_hmc_obj_info *hmc_obj;
+       struct i40iw_virt_mem hmc_obj_virt_mem;
+       struct i40iw_hmc_sd_table sd_table;
+       u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT];
+};
+
+struct update_sd_entry {
+       u64 cmd;
+       u64 data;
+};
+
+struct i40iw_update_sds_info {
+       u32 cnt;
+       u8 hmc_fn_id;
+       struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES];
+};
+
+struct i40iw_ccq_cqe_info;
+struct i40iw_hmc_fcn_info {
+       void (*callback_fcn)(struct i40iw_sc_dev *, void *,
+                            struct i40iw_ccq_cqe_info *);
+       void *cqp_callback_param;
+       u32 vf_id;
+       u16 iw_vf_idx;
+       bool free_fcn;
+};
+
+enum i40iw_hmc_rsrc_type {
+       I40IW_HMC_IW_QP = 0,
+       I40IW_HMC_IW_CQ = 1,
+       I40IW_HMC_IW_SRQ = 2,
+       I40IW_HMC_IW_HTE = 3,
+       I40IW_HMC_IW_ARP = 4,
+       I40IW_HMC_IW_APBVT_ENTRY = 5,
+       I40IW_HMC_IW_MR = 6,
+       I40IW_HMC_IW_XF = 7,
+       I40IW_HMC_IW_XFFL = 8,
+       I40IW_HMC_IW_Q1 = 9,
+       I40IW_HMC_IW_Q1FL = 10,
+       I40IW_HMC_IW_TIMER = 11,
+       I40IW_HMC_IW_FSIMC = 12,
+       I40IW_HMC_IW_FSIAV = 13,
+       I40IW_HMC_IW_PBLE = 14,
+       I40IW_HMC_IW_MAX = 15,
+};
+
+struct i40iw_hmc_create_obj_info {
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_virt_mem add_sd_virt_mem;
+       u32 rsrc_type;
+       u32 start_idx;
+       u32 count;
+       u32 add_sd_cnt;
+       enum i40iw_sd_entry_type entry_type;
+       bool is_pf;
+};
+
+struct i40iw_hmc_del_obj_info {
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_virt_mem del_sd_virt_mem;
+       u32 rsrc_type;
+       u32 start_idx;
+       u32 count;
+       u32 del_sd_cnt;
+       bool is_pf;
+};
+
+enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf,
+                                         struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size);
+enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
+                                              struct i40iw_hmc_create_obj_info *info);
+enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_del_obj_info *info,
+                                           bool reset);
+enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id,
+                                       u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type,
+                                       bool setsd);
+enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
+                                             struct i40iw_update_sds_info *info);
+struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id);
+struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
+                                                u8 hmc_fn_id);
+enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
+                                               struct i40iw_hmc_info *hmc_info, u32 sd_index,
+                                               enum i40iw_sd_entry_type type, u64 direct_mode_sz);
+enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
+                                               struct i40iw_hmc_info *hmc_info, u32 pd_index,
+                                               struct i40iw_dma_mem *rsrc_pg);
+enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
+                                         struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf);
+enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx);
+enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx);
+
+#define     ENTER_SHARED_FUNCTION()
+#define     EXIT_SHARED_FUNCTION()
+
+#endif                         /* I40IW_HMC_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
new file mode 100644 (file)
index 0000000..9fd3024
--- /dev/null
@@ -0,0 +1,730 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include "i40iw.h"
+
+/**
+ * i40iw_initialize_hw_resources - initialize hw resource during open
+ * @iwdev: iwarp device
+ */
+u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev)
+{
+       unsigned long num_pds;
+       u32 resources_size;
+       u32 max_mr;
+       u32 max_qp;
+       u32 max_cq;
+       u32 arp_table_size;
+       u32 mrdrvbits;
+       void *resource_ptr;
+
+       max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt;
+       max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+       max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt;
+       arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt;
+       iwdev->max_cqe = 0xFFFFF;
+       num_pds = max_qp * 4;
+       resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size;
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp);
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr);
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq);
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds);
+       resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size);
+       resources_size += sizeof(struct i40iw_qp **) * max_qp;
+       iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL);
+
+       if (!iwdev->mem_resources)
+               return -ENOMEM;
+
+       iwdev->max_qp = max_qp;
+       iwdev->max_mr = max_mr;
+       iwdev->max_cq = max_cq;
+       iwdev->max_pd = num_pds;
+       iwdev->arp_table_size = arp_table_size;
+       iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources;
+       resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size);
+
+       iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY |
+           IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS;
+
+       iwdev->allocated_qps = resource_ptr;
+       iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)];
+       iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)];
+       iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)];
+       iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)];
+       iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]);
+       set_bit(0, iwdev->allocated_mrs);
+       set_bit(0, iwdev->allocated_qps);
+       set_bit(0, iwdev->allocated_cqs);
+       set_bit(0, iwdev->allocated_pds);
+       set_bit(0, iwdev->allocated_arps);
+
+       /* Following for ILQ/IEQ */
+       set_bit(1, iwdev->allocated_qps);
+       set_bit(1, iwdev->allocated_cqs);
+       set_bit(1, iwdev->allocated_pds);
+       set_bit(2, iwdev->allocated_cqs);
+       set_bit(2, iwdev->allocated_pds);
+
+       spin_lock_init(&iwdev->resource_lock);
+       mrdrvbits = 24 - get_count_order(iwdev->max_mr);
+       iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits));
+       return 0;
+}
+
+/**
+ * i40iw_cqp_ce_handler - handle cqp completions
+ * @iwdev: iwarp device
+ * @arm: flag to arm after completions
+ * @cq: cq for cqp completions
+ */
+static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm)
+{
+       struct i40iw_cqp_request *cqp_request;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       u32 cqe_count = 0;
+       struct i40iw_ccq_cqe_info info;
+       int ret;
+
+       do {
+               memset(&info, 0, sizeof(info));
+               ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info);
+               if (ret)
+                       break;
+               cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch;
+               if (info.error)
+                       i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+                                    info.op_code, info.maj_err_code, info.min_err_code);
+               if (cqp_request) {
+                       cqp_request->compl_info.maj_err_code = info.maj_err_code;
+                       cqp_request->compl_info.min_err_code = info.min_err_code;
+                       cqp_request->compl_info.op_ret_val = info.op_ret_val;
+                       cqp_request->compl_info.error = info.error;
+
+                       if (cqp_request->waiting) {
+                               cqp_request->request_done = true;
+                               wake_up(&cqp_request->waitq);
+                               i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+                       } else {
+                               if (cqp_request->callback_fcn)
+                                       cqp_request->callback_fcn(cqp_request, 1);
+                               i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+                       }
+               }
+
+               cqe_count++;
+       } while (1);
+
+       if (arm && cqe_count) {
+               i40iw_process_bh(dev);
+               dev->ccq_ops->ccq_arm(cq);
+       }
+}
+
+/**
+ * i40iw_iwarp_ce_handler - handle iwarp completions
+ * @iwdev: iwarp device
+ * @iwcp: iwarp cq receiving event
+ */
+static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev,
+                                  struct i40iw_sc_cq *iwcq)
+{
+       struct i40iw_cq *i40iwcq = iwcq->back_cq;
+
+       if (i40iwcq->ibcq.comp_handler)
+               i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq,
+                                          i40iwcq->ibcq.cq_context);
+}
+
+/**
+ * i40iw_puda_ce_handler - handle puda completion events
+ * @iwdev: iwarp device
+ * @cq: puda completion q for event
+ */
+static void i40iw_puda_ce_handler(struct i40iw_device *iwdev,
+                                 struct i40iw_sc_cq *cq)
+{
+       struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev;
+       enum i40iw_status_code status;
+       u32 compl_error;
+
+       do {
+               status = i40iw_puda_poll_completion(dev, cq, &compl_error);
+               if (status == I40IW_ERR_QUEUE_EMPTY)
+                       break;
+               if (status) {
+                       i40iw_pr_err("puda  status = %d\n", status);
+                       break;
+               }
+               if (compl_error) {
+                       i40iw_pr_err("puda compl_err  =0x%x\n", compl_error);
+                       break;
+               }
+       } while (1);
+
+       dev->ccq_ops->ccq_arm(cq);
+}
+
+/**
+ * i40iw_process_ceq - handle ceq for completions
+ * @iwdev: iwarp device
+ * @ceq: ceq having cq for completion
+ */
+void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_sc_ceq *sc_ceq;
+       struct i40iw_sc_cq *cq;
+       bool arm = true;
+
+       sc_ceq = &ceq->sc_ceq;
+       do {
+               cq = dev->ceq_ops->process_ceq(dev, sc_ceq);
+               if (!cq)
+                       break;
+
+               if (cq->cq_type == I40IW_CQ_TYPE_CQP)
+                       i40iw_cqp_ce_handler(iwdev, cq, arm);
+               else if (cq->cq_type == I40IW_CQ_TYPE_IWARP)
+                       i40iw_iwarp_ce_handler(iwdev, cq);
+               else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) ||
+                        (cq->cq_type == I40IW_CQ_TYPE_IEQ))
+                       i40iw_puda_ce_handler(iwdev, cq);
+       } while (1);
+}
+
+/**
+ * i40iw_next_iw_state - modify qp state
+ * @iwqp: iwarp qp to modify
+ * @state: next state for qp
+ * @del_hash: del hash
+ * @term: term message
+ * @termlen: length of term message
+ */
+void i40iw_next_iw_state(struct i40iw_qp *iwqp,
+                        u8 state,
+                        u8 del_hash,
+                        u8 term,
+                        u8 termlen)
+{
+       struct i40iw_modify_qp_info info;
+
+       memset(&info, 0, sizeof(info));
+       info.next_iwarp_state = state;
+       info.remove_hash_idx = del_hash;
+       info.cq_num_valid = true;
+       info.arp_cache_idx_valid = true;
+       info.dont_send_term = true;
+       info.dont_send_fin = true;
+       info.termlen = termlen;
+
+       if (term & I40IWQP_TERM_SEND_TERM_ONLY)
+               info.dont_send_term = false;
+       if (term & I40IWQP_TERM_SEND_FIN_ONLY)
+               info.dont_send_fin = false;
+       if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR))
+               info.reset_tcp_conn = true;
+       i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
+}
+
+/**
+ * i40iw_process_aeq - handle aeq events
+ * @iwdev: iwarp device
+ */
+void i40iw_process_aeq(struct i40iw_device *iwdev)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_aeq *aeq = &iwdev->aeq;
+       struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq;
+       struct i40iw_aeqe_info aeinfo;
+       struct i40iw_aeqe_info *info = &aeinfo;
+       int ret;
+       struct i40iw_qp *iwqp = NULL;
+       struct i40iw_sc_cq *cq = NULL;
+       struct i40iw_cq *iwcq = NULL;
+       struct i40iw_sc_qp *qp = NULL;
+       struct i40iw_qp_host_ctx_info *ctx_info = NULL;
+       unsigned long flags;
+
+       u32 aeqcnt = 0;
+
+       if (!sc_aeq->size)
+               return;
+
+       do {
+               memset(info, 0, sizeof(*info));
+               ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info);
+               if (ret)
+                       break;
+
+               aeqcnt++;
+               i40iw_debug(dev, I40IW_DEBUG_AEQ,
+                           "%s ae_id = 0x%x bool qp=%d qp_id = %d\n",
+                           __func__, info->ae_id, info->qp, info->qp_cq_id);
+               if (info->qp) {
+                       iwqp = iwdev->qp_table[info->qp_cq_id];
+                       if (!iwqp) {
+                               i40iw_pr_err("qp_id %d is already freed\n", info->qp_cq_id);
+                               continue;
+                       }
+                       qp = &iwqp->sc_qp;
+                       spin_lock_irqsave(&iwqp->lock, flags);
+                       iwqp->hw_tcp_state = info->tcp_state;
+                       iwqp->hw_iwarp_state = info->iwarp_state;
+                       iwqp->last_aeq = info->ae_id;
+                       spin_unlock_irqrestore(&iwqp->lock, flags);
+                       ctx_info = &iwqp->ctx_info;
+                       ctx_info->err_rq_idx_valid = true;
+               } else {
+                       if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR)
+                               continue;
+               }
+
+               switch (info->ae_id) {
+               case I40IW_AE_LLP_FIN_RECEIVED:
+                       if (qp->term_flags)
+                               continue;
+                       if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
+                               iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT;
+                               if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) &&
+                                   (iwqp->ibqp_state == IB_QPS_RTS)) {
+                                       i40iw_next_iw_state(iwqp,
+                                                           I40IW_QP_STATE_CLOSING, 0, 0, 0);
+                                       i40iw_cm_disconn(iwqp);
+                               }
+                               iwqp->cm_id->add_ref(iwqp->cm_id);
+                               i40iw_schedule_cm_timer(iwqp->cm_node,
+                                                       (struct i40iw_puda_buf *)iwqp,
+                                                       I40IW_TIMER_TYPE_CLOSE, 1, 0);
+                       }
+                       break;
+               case I40IW_AE_LLP_CLOSE_COMPLETE:
+                       if (qp->term_flags)
+                               i40iw_terminate_done(qp, 0);
+                       else
+                               i40iw_cm_disconn(iwqp);
+                       break;
+               case I40IW_AE_RESET_SENT:
+                       i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0);
+                       i40iw_cm_disconn(iwqp);
+                       break;
+               case I40IW_AE_LLP_CONNECTION_RESET:
+                       if (atomic_read(&iwqp->close_timer_started))
+                               continue;
+                       i40iw_cm_disconn(iwqp);
+                       break;
+               case I40IW_AE_TERMINATE_SENT:
+                       i40iw_terminate_send_fin(qp);
+                       break;
+               case I40IW_AE_LLP_TERMINATE_RECEIVED:
+                       i40iw_terminate_received(qp, info);
+                       break;
+               case I40IW_AE_CQ_OPERATION_ERROR:
+                       i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n",
+                                    info->ae_id);
+                       cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx;
+                       iwcq = (struct i40iw_cq *)cq->back_cq;
+
+                       if (iwcq->ibcq.event_handler) {
+                               struct ib_event ibevent;
+
+                               ibevent.device = iwcq->ibcq.device;
+                               ibevent.event = IB_EVENT_CQ_ERR;
+                               ibevent.element.cq = &iwcq->ibcq;
+                               iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context);
+                       }
+                       break;
+               case I40IW_AE_PRIV_OPERATION_DENIED:
+               case I40IW_AE_STAG_ZERO_INVALID:
+               case I40IW_AE_IB_RREQ_AND_Q1_FULL:
+               case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
+               case I40IW_AE_DDP_UBE_INVALID_MO:
+               case I40IW_AE_DDP_UBE_INVALID_QN:
+               case I40IW_AE_DDP_NO_L_BIT:
+               case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
+               case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
+               case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
+               case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+               case I40IW_AE_INVALID_ARP_ENTRY:
+               case I40IW_AE_INVALID_TCP_OPTION_RCVD:
+               case I40IW_AE_STALE_ARP_ENTRY:
+               case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
+               case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
+               case I40IW_AE_LLP_SYN_RECEIVED:
+               case I40IW_AE_LLP_TOO_MANY_RETRIES:
+               case I40IW_AE_LLP_DOUBT_REACHABILITY:
+               case I40IW_AE_LCE_QP_CATASTROPHIC:
+               case I40IW_AE_LCE_FUNCTION_CATASTROPHIC:
+               case I40IW_AE_LCE_CQ_CATASTROPHIC:
+               case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
+               case I40IW_AE_UDA_XMIT_IPADDR_MISMATCH:
+               case I40IW_AE_QP_SUSPEND_COMPLETE:
+                       ctx_info->err_rq_idx_valid = false;
+               default:
+                               if (!info->sq && ctx_info->err_rq_idx_valid) {
+                                       ctx_info->err_rq_idx = info->wqe_idx;
+                                       ctx_info->tcp_info_valid = false;
+                                       ctx_info->iwarp_info_valid = false;
+                                       ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+                                                                            iwqp->host_ctx.va,
+                                                                            ctx_info);
+                               }
+                               i40iw_terminate_connection(qp, info);
+                               break;
+               }
+       } while (1);
+
+       if (aeqcnt)
+               dev->aeq_ops->repost_aeq_entries(dev, aeqcnt);
+}
+
+/**
+ * i40iw_manage_apbvt - add or delete tcp port
+ * @iwdev: iwarp device
+ * @accel_local_port: port for apbvt
+ * @add_port: add or delete port
+ */
+int i40iw_manage_apbvt(struct i40iw_device *iwdev, u16 accel_local_port, bool add_port)
+{
+       struct i40iw_apbvt_info *info;
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port);
+       if (!cqp_request)
+               return -ENOMEM;
+
+       cqp_info = &cqp_request->info;
+       info = &cqp_info->in.u.manage_apbvt_entry.info;
+
+       memset(info, 0, sizeof(*info));
+       info->add = add_port;
+       info->port = cpu_to_le16(accel_local_port);
+
+       cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Manage APBVT entry fail");
+       return status;
+}
+
+/**
+ * i40iw_manage_arp_cache - manage hw arp cache
+ * @iwdev: iwarp device
+ * @mac_addr: mac address ptr
+ * @ip_addr: ip addr for arp cache
+ * @action: add, delete or modify
+ */
+void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+                           unsigned char *mac_addr,
+                           __be32 *ip_addr,
+                           bool ipv4,
+                           u32 action)
+{
+       struct i40iw_add_arp_cache_entry_info *info;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       int arp_index;
+
+       arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
+       if (arp_index == -1)
+               return;
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+       if (!cqp_request)
+               return;
+
+       cqp_info = &cqp_request->info;
+       if (action == I40IW_ARP_ADD) {
+               cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY;
+               info = &cqp_info->in.u.add_arp_cache_entry.info;
+               memset(info, 0, sizeof(*info));
+               info->arp_index = cpu_to_le32(arp_index);
+               info->permanent = true;
+               ether_addr_copy(info->mac_addr, mac_addr);
+               cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+               cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+       } else {
+               cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY;
+               cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+               cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+               cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index;
+       }
+
+       cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request;
+       cqp_info->post_sq = 1;
+       if (i40iw_handle_cqp_op(iwdev, cqp_request))
+               i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail");
+}
+
+/**
+ * i40iw_send_syn_cqp_callback - do syn/ack after qhash
+ * @cqp_request: qhash cqp completion
+ * @send_ack: flag send ack
+ */
+static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack)
+{
+       i40iw_send_syn(cqp_request->param, send_ack);
+}
+
+/**
+ * i40iw_manage_qhash - add or modify qhash
+ * @iwdev: iwarp device
+ * @cminfo: cm info for qhash
+ * @etype: type (syn or quad)
+ * @mtype: type of qhash
+ * @cmnode: cmnode associated with connection
+ * @wait: wait for completion
+ * @user_pri:user pri of the connection
+ */
+enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev,
+                                         struct i40iw_cm_info *cminfo,
+                                         enum i40iw_quad_entry_type etype,
+                                         enum i40iw_quad_hash_manage_type mtype,
+                                         void *cmnode,
+                                         bool wait)
+{
+       struct i40iw_qhash_table_info *info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       cqp_request = i40iw_get_cqp_request(iwcqp, wait);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+       cqp_info = &cqp_request->info;
+       info = &cqp_info->in.u.manage_qhash_table_entry.info;
+       memset(info, 0, sizeof(*info));
+
+       info->manage = mtype;
+       info->entry_type = etype;
+       if (cminfo->vlan_id != 0xFFFF) {
+               info->vlan_valid = true;
+               info->vlan_id = cpu_to_le16(cminfo->vlan_id);
+       } else {
+               info->vlan_valid = false;
+       }
+
+       info->ipv4_valid = cminfo->ipv4;
+       ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr);
+       info->qp_num = cpu_to_le32(dev->ilq->qp_id);
+       info->dest_port = cpu_to_le16(cminfo->loc_port);
+       info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]);
+       info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]);
+       info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]);
+       info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]);
+       if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
+               info->src_port = cpu_to_le16(cminfo->rem_port);
+               info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]);
+               info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]);
+               info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]);
+               info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]);
+       }
+       if (cmnode) {
+               cqp_request->callback_fcn = i40iw_send_syn_cqp_callback;
+               cqp_request->param = (void *)cmnode;
+       }
+
+       if (info->ipv4_valid)
+               i40iw_debug(dev, I40IW_DEBUG_CM,
+                           "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n",
+                           __func__, (!mtype) ? "DELETE" : "ADD",
+                           info->dest_ip,
+                           info->dest_port, info->mac_addr, cminfo->vlan_id);
+       else
+               i40iw_debug(dev, I40IW_DEBUG_CM,
+                           "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n",
+                           __func__, (!mtype) ? "DELETE" : "ADD",
+                           info->dest_ip,
+                           info->dest_port, info->mac_addr, cminfo->vlan_id);
+       cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request;
+       cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY;
+       cqp_info->post_sq = 1;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Manage Qhash Entry fail");
+       return status;
+}
+
+/**
+ * i40iw_hw_flush_wqes - flush qp's wqe
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ * @info: info for flush
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev,
+                                          struct i40iw_sc_qp *qp,
+                                          struct i40iw_qp_flush_info *info,
+                                          bool wait)
+{
+       enum i40iw_status_code status;
+       struct i40iw_qp_flush_info *hw_info;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+
+       cqp_info = &cqp_request->info;
+       hw_info = &cqp_request->info.in.u.qp_flush_wqes.info;
+       memcpy(hw_info, info, sizeof(*hw_info));
+
+       cqp_info->cqp_cmd = OP_QP_FLUSH_WQES;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.qp_flush_wqes.qp = qp;
+       cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Flush WQE's fail");
+       return status;
+}
+
+/**
+ * i40iw_hw_manage_vf_pble_bp - manage vf pbles
+ * @iwdev: iwarp device
+ * @info: info for managing pble
+ * @wait: flag wait for completion
+ */
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+                                                 struct i40iw_manage_vf_pble_info *info,
+                                                 bool wait)
+{
+       enum i40iw_status_code status;
+       struct i40iw_manage_vf_pble_info *hw_info;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       if ((iwdev->init_state < CCQ_CREATED) && wait)
+               wait = false;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+
+       cqp_info = &cqp_request->info;
+       hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info;
+       memcpy(hw_info, info, sizeof(*hw_info));
+
+       cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Manage VF pble_bp fail");
+       return status;
+}
+
+/**
+ * i40iw_get_ib_wc - return change flush code to IB's
+ * @opcode: iwarp flush code
+ */
+static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode)
+{
+       switch (opcode) {
+       case FLUSH_PROT_ERR:
+               return IB_WC_LOC_PROT_ERR;
+       case FLUSH_REM_ACCESS_ERR:
+               return IB_WC_REM_ACCESS_ERR;
+       case FLUSH_LOC_QP_OP_ERR:
+               return IB_WC_LOC_QP_OP_ERR;
+       case FLUSH_REM_OP_ERR:
+               return IB_WC_REM_OP_ERR;
+       case FLUSH_LOC_LEN_ERR:
+               return IB_WC_LOC_LEN_ERR;
+       case FLUSH_GENERAL_ERR:
+               return IB_WC_GENERAL_ERR;
+       case FLUSH_FATAL_ERR:
+       default:
+               return IB_WC_FATAL_ERR;
+       }
+}
+
+/**
+ * i40iw_set_flush_info - set flush info
+ * @pinfo: set flush info
+ * @min: minor err
+ * @maj: major err
+ * @opcode: flush error code
+ */
+static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo,
+                                u16 *min,
+                                u16 *maj,
+                                enum i40iw_flush_opcode opcode)
+{
+       *min = (u16)i40iw_get_ib_wc(opcode);
+       *maj = CQE_MAJOR_DRV;
+       pinfo->userflushcode = true;
+}
+
+/**
+ * i40iw_flush_wqes - flush wqe for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp to flush wqes
+ */
+void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp)
+{
+       struct i40iw_qp_flush_info info;
+       struct i40iw_qp_flush_info *pinfo = &info;
+
+       struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+       memset(pinfo, 0, sizeof(*pinfo));
+       info.sq = true;
+       info.rq = true;
+       if (qp->term_flags) {
+               i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code,
+                                    &pinfo->sq_major_code, qp->flush_code);
+               i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code,
+                                    &pinfo->rq_major_code, qp->flush_code);
+       }
+       (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
new file mode 100644 (file)
index 0000000..90e5af2
--- /dev/null
@@ -0,0 +1,1910 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <net/addrconf.h>
+
+#include "i40iw.h"
+#include "i40iw_register.h"
+#include <net/netevent.h>
+#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
+#define CLIENT_IW_INTERFACE_VERSION_MINOR 01
+#define CLIENT_IW_INTERFACE_VERSION_BUILD 00
+
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 123
+#define DRV_VERSION    __stringify(DRV_VERSION_MAJOR) "."              \
+       __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
+
+static int push_mode;
+module_param(push_mode, int, 0644);
+MODULE_PARM_DESC(push_mode, "Low latency mode: 0=disabled (default), 1=enabled)");
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
+
+static int resource_profile;
+module_param(resource_profile, int, 0644);
+MODULE_PARM_DESC(resource_profile,
+                "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
+
+static int max_rdma_vfs = 32;
+module_param(max_rdma_vfs, int, 0644);
+MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
+static int mpa_version = 2;
+module_param(mpa_version, int, 0644);
+MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
+
+MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static struct i40e_client i40iw_client;
+static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
+
+static LIST_HEAD(i40iw_handlers);
+static spinlock_t i40iw_handler_lock;
+
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+                                                 u32 vf_id, u8 *msg, u16 len);
+
+static struct notifier_block i40iw_inetaddr_notifier = {
+       .notifier_call = i40iw_inetaddr_event
+};
+
+static struct notifier_block i40iw_inetaddr6_notifier = {
+       .notifier_call = i40iw_inet6addr_event
+};
+
+static struct notifier_block i40iw_net_notifier = {
+       .notifier_call = i40iw_net_event
+};
+
+static int i40iw_notifiers_registered;
+
+/**
+ * i40iw_find_i40e_handler - find a handler given a client info
+ * @ldev: pointer to a client info
+ */
+static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
+{
+       struct i40iw_handler *hdl;
+       unsigned long flags;
+
+       spin_lock_irqsave(&i40iw_handler_lock, flags);
+       list_for_each_entry(hdl, &i40iw_handlers, list) {
+               if (hdl->ldev.netdev == ldev->netdev) {
+                       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+                       return hdl;
+               }
+       }
+       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+       return NULL;
+}
+
+/**
+ * i40iw_find_netdev - find a handler given a netdev
+ * @netdev: pointer to net_device
+ */
+struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
+{
+       struct i40iw_handler *hdl;
+       unsigned long flags;
+
+       spin_lock_irqsave(&i40iw_handler_lock, flags);
+       list_for_each_entry(hdl, &i40iw_handlers, list) {
+               if (hdl->ldev.netdev == netdev) {
+                       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+                       return hdl;
+               }
+       }
+       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+       return NULL;
+}
+
+/**
+ * i40iw_add_handler - add a handler to the list
+ * @hdl: handler to be added to the handler list
+ */
+static void i40iw_add_handler(struct i40iw_handler *hdl)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&i40iw_handler_lock, flags);
+       list_add(&hdl->list, &i40iw_handlers);
+       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+}
+
+/**
+ * i40iw_del_handler - delete a handler from the list
+ * @hdl: handler to be deleted from the handler list
+ */
+static int i40iw_del_handler(struct i40iw_handler *hdl)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&i40iw_handler_lock, flags);
+       list_del(&hdl->list);
+       spin_unlock_irqrestore(&i40iw_handler_lock, flags);
+       return 0;
+}
+
+/**
+ * i40iw_enable_intr - set up device interrupts
+ * @dev: hardware control device structure
+ * @msix_id: id of the interrupt to be enabled
+ */
+static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
+{
+       u32 val;
+
+       val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+               I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+               (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+       if (dev->is_pf)
+               i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
+       else
+               i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
+}
+
+/**
+ * i40iw_dpc - tasklet for aeq and ceq 0
+ * @data: iwarp device
+ */
+static void i40iw_dpc(unsigned long data)
+{
+       struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+       if (iwdev->msix_shared)
+               i40iw_process_ceq(iwdev, iwdev->ceqlist);
+       i40iw_process_aeq(iwdev);
+       i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
+}
+
+/**
+ * i40iw_ceq_dpc - dpc handler for CEQ
+ * @data: data points to CEQ
+ */
+static void i40iw_ceq_dpc(unsigned long data)
+{
+       struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+       struct i40iw_device *iwdev = iwceq->iwdev;
+
+       i40iw_process_ceq(iwdev, iwceq);
+       i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
+}
+
+/**
+ * i40iw_irq_handler - interrupt handler for aeq and ceq0
+ * @irq: Interrupt request number
+ * @data: iwarp device
+ */
+static irqreturn_t i40iw_irq_handler(int irq, void *data)
+{
+       struct i40iw_device *iwdev = (struct i40iw_device *)data;
+
+       tasklet_schedule(&iwdev->dpc_tasklet);
+       return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_destroy_cqp  - destroy control qp
+ * @iwdev: iwarp device
+ * @create_done: 1 if cqp create poll was success
+ *
+ * Issue destroy cqp request and
+ * free the resources associated with the cqp
+ */
+static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
+{
+       enum i40iw_status_code status = 0;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_cqp *cqp = &iwdev->cqp;
+
+       if (free_hwcqp && dev->cqp_ops->cqp_destroy)
+               status = dev->cqp_ops->cqp_destroy(dev->cqp);
+       if (status)
+               i40iw_pr_err("destroy cqp failed");
+
+       i40iw_free_dma_mem(dev->hw, &cqp->sq);
+       kfree(cqp->scratch_array);
+       iwdev->cqp.scratch_array = NULL;
+
+       kfree(cqp->cqp_requests);
+       cqp->cqp_requests = NULL;
+}
+
+/**
+ * i40iw_disable_irqs - disable device interrupts
+ * @dev: hardware control device structure
+ * @msic_vec: msix vector to disable irq
+ * @dev_id: parameter to pass to free_irq (used during irq setup)
+ *
+ * The function is called when destroying aeq/ceq
+ */
+static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
+                             struct i40iw_msix_vector *msix_vec,
+                             void *dev_id)
+{
+       if (dev->is_pf)
+               i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
+       else
+               i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
+       synchronize_irq(msix_vec->irq);
+       free_irq(msix_vec->irq, dev_id);
+}
+
+/**
+ * i40iw_destroy_aeq - destroy aeq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue a destroy aeq request and
+ * free the resources associated with the aeq
+ * The function is called during driver unload
+ */
+static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset)
+{
+       enum i40iw_status_code status = I40IW_ERR_NOT_READY;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_aeq *aeq = &iwdev->aeq;
+
+       if (!iwdev->msix_shared)
+               i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
+       if (reset)
+               goto exit;
+
+       if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
+               status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
+       if (status)
+               i40iw_pr_err("destroy aeq failed %d\n", status);
+
+exit:
+       i40iw_free_dma_mem(dev->hw, &aeq->mem);
+}
+
+/**
+ * i40iw_destroy_ceq - destroy ceq
+ * @iwdev: iwarp device
+ * @iwceq: ceq to be destroyed
+ * @reset: true if called before reset
+ *
+ * Issue a destroy ceq request and
+ * free the resources associated with the ceq
+ */
+static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
+                             struct i40iw_ceq *iwceq,
+                             bool reset)
+{
+       enum i40iw_status_code status;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+       if (reset)
+               goto exit;
+
+       status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
+       if (status) {
+               i40iw_pr_err("ceq destroy command failed %d\n", status);
+               goto exit;
+       }
+
+       status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
+       if (status)
+               i40iw_pr_err("ceq destroy completion failed %d\n", status);
+exit:
+       i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+}
+
+/**
+ * i40iw_dele_ceqs - destroy all ceq's
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Go through all of the device ceq's and for each ceq
+ * disable the ceq interrupt and destroy the ceq
+ */
+static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset)
+{
+       u32 i = 0;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_ceq *iwceq = iwdev->ceqlist;
+       struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+
+       if (iwdev->msix_shared) {
+               i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
+               i40iw_destroy_ceq(iwdev, iwceq, reset);
+               iwceq++;
+               i++;
+       }
+
+       for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
+               i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
+               i40iw_destroy_ceq(iwdev, iwceq, reset);
+       }
+}
+
+/**
+ * i40iw_destroy_ccq - destroy control cq
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ *
+ * Issue destroy ccq request and
+ * free the resources associated with the ccq
+ */
+static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_ccq *ccq = &iwdev->ccq;
+       enum i40iw_status_code status = 0;
+
+       if (!reset)
+               status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
+       if (status)
+               i40iw_pr_err("ccq destroy failed %d\n", status);
+       i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+}
+
+/* types of hmc objects */
+static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
+       I40IW_HMC_IW_QP,
+       I40IW_HMC_IW_CQ,
+       I40IW_HMC_IW_HTE,
+       I40IW_HMC_IW_ARP,
+       I40IW_HMC_IW_APBVT_ENTRY,
+       I40IW_HMC_IW_MR,
+       I40IW_HMC_IW_XF,
+       I40IW_HMC_IW_XFFL,
+       I40IW_HMC_IW_Q1,
+       I40IW_HMC_IW_Q1FL,
+       I40IW_HMC_IW_TIMER,
+};
+
+/**
+ * i40iw_close_hmc_objects_type - delete hmc objects of a given type
+ * @iwdev: iwarp device
+ * @obj_type: the hmc object type to be deleted
+ * @is_pf: true if the function is PF otherwise false
+ * @reset: true if called before reset
+ */
+static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
+                                        enum i40iw_hmc_rsrc_type obj_type,
+                                        struct i40iw_hmc_info *hmc_info,
+                                        bool is_pf,
+                                        bool reset)
+{
+       struct i40iw_hmc_del_obj_info info;
+
+       memset(&info, 0, sizeof(info));
+       info.hmc_info = hmc_info;
+       info.rsrc_type = obj_type;
+       info.count = hmc_info->hmc_obj[obj_type].cnt;
+       info.is_pf = is_pf;
+       if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
+               i40iw_pr_err("del obj of type %d failed\n", obj_type);
+}
+
+/**
+ * i40iw_del_hmc_objects - remove all device hmc objects
+ * @dev: iwarp device
+ * @hmc_info: hmc_info to free
+ * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
+ *        by PF on behalf of VF
+ * @reset: true if called before reset
+ */
+static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
+                                 struct i40iw_hmc_info *hmc_info,
+                                 bool is_pf,
+                                 bool reset)
+{
+       unsigned int i;
+
+       for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
+               i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
+}
+
+/**
+ * i40iw_ceq_handler - interrupt handler for ceq
+ * @data: ceq pointer
+ */
+static irqreturn_t i40iw_ceq_handler(int irq, void *data)
+{
+       struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
+
+       if (iwceq->irq != irq)
+               i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
+       tasklet_schedule(&iwceq->dpc_tasklet);
+       return IRQ_HANDLED;
+}
+
+/**
+ * i40iw_create_hmc_obj_type - create hmc object of a given type
+ * @dev: hardware control device structure
+ * @info: information for the hmc object to create
+ */
+static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
+                                                       struct i40iw_hmc_create_obj_info *info)
+{
+       return dev->hmc_ops->create_hmc_object(dev, info);
+}
+
+/**
+ * i40iw_create_hmc_objs - create all hmc objects for the device
+ * @iwdev: iwarp device
+ * @is_pf: true if the function is PF otherwise false
+ *
+ * Create the device hmc objects and allocate hmc pages
+ * Return 0 if successful, otherwise clean up and return error
+ */
+static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
+                                                   bool is_pf)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_hmc_create_obj_info info;
+       enum i40iw_status_code status;
+       int i;
+
+       memset(&info, 0, sizeof(info));
+       info.hmc_info = dev->hmc_info;
+       info.is_pf = is_pf;
+       info.entry_type = iwdev->sd_type;
+       for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
+               info.rsrc_type = iw_hmc_obj_types[i];
+               info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
+               status = i40iw_create_hmc_obj_type(dev, &info);
+               if (status) {
+                       i40iw_pr_err("create obj type %d status = %d\n",
+                                    iw_hmc_obj_types[i], status);
+                       break;
+               }
+       }
+       if (!status)
+               return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
+                                                                     dev->hmc_fn_id,
+                                                                     true, true));
+
+       while (i) {
+               i--;
+               /* destroy the hmc objects of a given type */
+               i40iw_close_hmc_objects_type(dev,
+                                            iw_hmc_obj_types[i],
+                                            dev->hmc_info,
+                                            is_pf,
+                                            false);
+       }
+       return status;
+}
+
+/**
+ * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
+ * @iwdev: iwarp device
+ * @memptr: points to the memory addresses
+ * @size: size of memory needed
+ * @mask: mask for the aligned memory
+ *
+ * Get aligned memory of the requested size and
+ * update the memptr to point to the new aligned memory
+ * Return 0 if successful, otherwise return no memory error
+ */
+enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
+                                            struct i40iw_dma_mem *memptr,
+                                            u32 size,
+                                            u32 mask)
+{
+       unsigned long va, newva;
+       unsigned long extra;
+
+       va = (unsigned long)iwdev->obj_next.va;
+       newva = va;
+       if (mask)
+               newva = ALIGN(va, (mask + 1));
+       extra = newva - va;
+       memptr->va = (u8 *)va + extra;
+       memptr->pa = iwdev->obj_next.pa + extra;
+       memptr->size = size;
+       if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
+               return I40IW_ERR_NO_MEMORY;
+
+       iwdev->obj_next.va = memptr->va + size;
+       iwdev->obj_next.pa = memptr->pa + size;
+       return 0;
+}
+
+/**
+ * i40iw_create_cqp - create control qp
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the cqp and all the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
+{
+       enum i40iw_status_code status;
+       u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
+       struct i40iw_dma_mem mem;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_cqp_init_info cqp_init_info;
+       struct i40iw_cqp *cqp = &iwdev->cqp;
+       u16 maj_err, min_err;
+       int i;
+
+       cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
+       if (!cqp->cqp_requests)
+               return I40IW_ERR_NO_MEMORY;
+       cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
+       if (!cqp->scratch_array) {
+               kfree(cqp->cqp_requests);
+               return I40IW_ERR_NO_MEMORY;
+       }
+       dev->cqp = &cqp->sc_cqp;
+       dev->cqp->dev = dev;
+       memset(&cqp_init_info, 0, sizeof(cqp_init_info));
+       status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
+                                       (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
+                                       I40IW_CQP_ALIGNMENT);
+       if (status)
+               goto exit;
+       status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
+                                      I40IW_HOST_CTX_ALIGNMENT_MASK);
+       if (status)
+               goto exit;
+       dev->cqp->host_ctx_pa = mem.pa;
+       dev->cqp->host_ctx = mem.va;
+       /* populate the cqp init info */
+       cqp_init_info.dev = dev;
+       cqp_init_info.sq_size = sqsize;
+       cqp_init_info.sq = cqp->sq.va;
+       cqp_init_info.sq_pa = cqp->sq.pa;
+       cqp_init_info.host_ctx_pa = mem.pa;
+       cqp_init_info.host_ctx = mem.va;
+       cqp_init_info.hmc_profile = iwdev->resource_profile;
+       cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
+       cqp_init_info.scratch_array = cqp->scratch_array;
+       status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
+       if (status) {
+               i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
+                            status, maj_err, min_err);
+               goto exit;
+       }
+       status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
+       if (status) {
+               i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
+                            status, maj_err, min_err);
+               goto exit;
+       }
+       spin_lock_init(&cqp->req_lock);
+       INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
+       INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
+       /* init the waitq of the cqp_requests and add them to the list */
+       for (i = 0; i < I40IW_CQP_SW_SQSIZE_2048; i++) {
+               init_waitqueue_head(&cqp->cqp_requests[i].waitq);
+               list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
+       }
+       return 0;
+exit:
+       /* clean up the created resources */
+       i40iw_destroy_cqp(iwdev, false);
+       return status;
+}
+
+/**
+ * i40iw_create_ccq - create control cq
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the ccq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_dma_mem mem;
+       enum i40iw_status_code status;
+       struct i40iw_ccq_init_info info;
+       struct i40iw_ccq *ccq = &iwdev->ccq;
+
+       memset(&info, 0, sizeof(info));
+       dev->ccq = &ccq->sc_cq;
+       dev->ccq->dev = dev;
+       info.dev = dev;
+       ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
+       ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
+       status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
+                                       ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
+       if (status)
+               goto exit;
+       status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
+                                      I40IW_SHADOWAREA_MASK);
+       if (status)
+               goto exit;
+       ccq->sc_cq.back_cq = (void *)ccq;
+       /* populate the ccq init info */
+       info.cq_base = ccq->mem_cq.va;
+       info.cq_pa = ccq->mem_cq.pa;
+       info.num_elem = IW_CCQ_SIZE;
+       info.shadow_area = mem.va;
+       info.shadow_area_pa = mem.pa;
+       info.ceqe_mask = false;
+       info.ceq_id_valid = true;
+       info.shadow_read_threshold = 16;
+       status = dev->ccq_ops->ccq_init(dev->ccq, &info);
+       if (!status)
+               status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
+exit:
+       if (status)
+               i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
+       return status;
+}
+
+/**
+ * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
+ * @iwdev: iwarp device
+ * @msix_vec: interrupt vector information
+ * @iwceq: ceq associated with the vector
+ * @ceq_id: the id number of the iwceq
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
+                                                        struct i40iw_ceq *iwceq,
+                                                        u32 ceq_id,
+                                                        struct i40iw_msix_vector *msix_vec)
+{
+       enum i40iw_status_code status;
+
+       if (iwdev->msix_shared && !ceq_id) {
+               tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+               status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
+       } else {
+               tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq);
+               status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
+       }
+
+       if (status) {
+               i40iw_pr_err("ceq irq config fail\n");
+               return I40IW_ERR_CONFIG;
+       }
+       msix_vec->ceq_id = ceq_id;
+       msix_vec->cpu_affinity = 0;
+
+       return 0;
+}
+
+/**
+ * i40iw_create_ceq - create completion event queue
+ * @iwdev: iwarp device
+ * @iwceq: pointer to the ceq resources to be created
+ * @ceq_id: the id number of the iwceq
+ *
+ * Return 0, if the ceq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
+                                              struct i40iw_ceq *iwceq,
+                                              u32 ceq_id)
+{
+       enum i40iw_status_code status;
+       struct i40iw_ceq_init_info info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       u64 scratch;
+
+       memset(&info, 0, sizeof(info));
+       info.ceq_id = ceq_id;
+       iwceq->iwdev = iwdev;
+       iwceq->mem.size = sizeof(struct i40iw_ceqe) *
+               iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+       status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
+                                       I40IW_CEQ_ALIGNMENT);
+       if (status)
+               goto exit;
+       info.ceq_id = ceq_id;
+       info.ceqe_base = iwceq->mem.va;
+       info.ceqe_pa = iwceq->mem.pa;
+
+       info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+       iwceq->sc_ceq.ceq_id = ceq_id;
+       info.dev = dev;
+       scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
+       status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
+       if (!status)
+               status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
+
+exit:
+       if (status)
+               i40iw_free_dma_mem(dev->hw, &iwceq->mem);
+       return status;
+}
+
+void i40iw_request_reset(struct i40iw_device *iwdev)
+{
+       struct i40e_info *ldev = iwdev->ldev;
+
+       ldev->ops->request_reset(ldev, iwdev->client, 1);
+}
+
+/**
+ * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
+ * @iwdev: iwarp device
+ * @ldev: i40e lan device
+ *
+ * Allocate a list for all device completion event queues
+ * Create the ceq's and configure their msix interrupt vectors
+ * Return 0, if at least one ceq is successfully set up, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
+                                              struct i40e_info *ldev)
+{
+       u32 i;
+       u32 ceq_id;
+       struct i40iw_ceq *iwceq;
+       struct i40iw_msix_vector *msix_vec;
+       enum i40iw_status_code status = 0;
+       u32 num_ceqs;
+
+       if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
+               status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
+                                                iwdev->iw_qvlist);
+               if (status)
+                       goto exit;
+       } else {
+               status = I40IW_ERR_BAD_PTR;
+               goto exit;
+       }
+
+       num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
+       iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
+       if (!iwdev->ceqlist) {
+               status = I40IW_ERR_NO_MEMORY;
+               goto exit;
+       }
+       i = (iwdev->msix_shared) ? 0 : 1;
+       for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
+               iwceq = &iwdev->ceqlist[ceq_id];
+               status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
+               if (status) {
+                       i40iw_pr_err("create ceq status = %d\n", status);
+                       break;
+               }
+
+               msix_vec = &iwdev->iw_msixtbl[i];
+               iwceq->irq = msix_vec->irq;
+               iwceq->msix_idx = msix_vec->idx;
+               status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
+               if (status) {
+                       i40iw_destroy_ceq(iwdev, iwceq, false);
+                       break;
+               }
+               i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
+               iwdev->ceqs_count++;
+       }
+
+exit:
+       if (status) {
+               if (!iwdev->ceqs_count) {
+                       kfree(iwdev->ceqlist);
+                       iwdev->ceqlist = NULL;
+               } else {
+                       status = 0;
+               }
+       }
+       return status;
+}
+
+/**
+ * i40iw_configure_aeq_vector - set up the msix vector for aeq
+ * @iwdev: iwarp device
+ *
+ * Allocate interrupt resources and enable irq handling
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
+{
+       struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
+       u32 ret = 0;
+
+       if (!iwdev->msix_shared) {
+               tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev);
+               ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
+       }
+       if (ret) {
+               i40iw_pr_err("aeq irq config fail\n");
+               return I40IW_ERR_CONFIG;
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_create_aeq - create async event queue
+ * @iwdev: iwarp device
+ *
+ * Return 0, if the aeq and the resources associated with it
+ * are successfully created, otherwise return error
+ */
+static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
+{
+       enum i40iw_status_code status;
+       struct i40iw_aeq_init_info info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_aeq *aeq = &iwdev->aeq;
+       u64 scratch = 0;
+       u32 aeq_size;
+
+       aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
+               iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
+       memset(&info, 0, sizeof(info));
+       aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
+       status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
+                                       I40IW_AEQ_ALIGNMENT);
+       if (status)
+               goto exit;
+
+       info.aeqe_base = aeq->mem.va;
+       info.aeq_elem_pa = aeq->mem.pa;
+       info.elem_cnt = aeq_size;
+       info.dev = dev;
+       status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
+       if (status)
+               goto exit;
+       status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
+       if (!status)
+               status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
+exit:
+       if (status)
+               i40iw_free_dma_mem(dev->hw, &aeq->mem);
+       return status;
+}
+
+/**
+ * i40iw_setup_aeq - set up the device aeq
+ * @iwdev: iwarp device
+ *
+ * Create the aeq and configure its msix interrupt vector
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+
+       status = i40iw_create_aeq(iwdev);
+       if (status)
+               return status;
+
+       status = i40iw_configure_aeq_vector(iwdev);
+       if (status) {
+               i40iw_destroy_aeq(iwdev, false);
+               return status;
+       }
+
+       if (!iwdev->msix_shared)
+               i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
+       return 0;
+}
+
+/**
+ * i40iw_initialize_ilq - create iwarp local queue for cm
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
+{
+       struct i40iw_puda_rsrc_info info;
+       enum i40iw_status_code status;
+
+       info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
+       info.cq_id = 1;
+       info.qp_id = 0;
+       info.count = 1;
+       info.pd_id = 1;
+       info.sq_size = 8192;
+       info.rq_size = 8192;
+       info.buf_size = 1024;
+       info.tx_buf_cnt = 16384;
+       info.mss = iwdev->mss;
+       info.receive = i40iw_receive_ilq;
+       info.xmit_complete = i40iw_free_sqbuf;
+       status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+       if (status)
+               i40iw_pr_err("ilq create fail\n");
+       return status;
+}
+
+/**
+ * i40iw_initialize_ieq - create iwarp exception queue
+ * @iwdev: iwarp device
+ *
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
+{
+       struct i40iw_puda_rsrc_info info;
+       enum i40iw_status_code status;
+
+       info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
+       info.cq_id = 2;
+       info.qp_id = iwdev->sc_dev.exception_lan_queue;
+       info.count = 1;
+       info.pd_id = 2;
+       info.sq_size = 8192;
+       info.rq_size = 8192;
+       info.buf_size = 2048;
+       info.mss = iwdev->mss;
+       info.tx_buf_cnt = 16384;
+       status = i40iw_puda_create_rsrc(&iwdev->sc_dev, &info);
+       if (status)
+               i40iw_pr_err("ieq create fail\n");
+       return status;
+}
+
+/**
+ * i40iw_hmc_setup - create hmc objects for the device
+ * @iwdev: iwarp device
+ *
+ * Set up the device private memory space for the number and size of
+ * the hmc objects and create the objects
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
+{
+       enum i40iw_status_code status;
+
+       iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
+       status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
+       if (status)
+               goto exit;
+       status = i40iw_create_hmc_objs(iwdev, true);
+       if (status)
+               goto exit;
+       iwdev->init_state = HMC_OBJS_CREATED;
+exit:
+       return status;
+}
+
+/**
+ * i40iw_del_init_mem - deallocate memory resources
+ * @iwdev: iwarp device
+ */
+static void i40iw_del_init_mem(struct i40iw_device *iwdev)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+       i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
+       kfree(dev->hmc_info->sd_table.sd_entry);
+       dev->hmc_info->sd_table.sd_entry = NULL;
+       kfree(iwdev->mem_resources);
+       iwdev->mem_resources = NULL;
+       kfree(iwdev->ceqlist);
+       iwdev->ceqlist = NULL;
+       kfree(iwdev->iw_msixtbl);
+       iwdev->iw_msixtbl = NULL;
+       kfree(iwdev->hmc_info_mem);
+       iwdev->hmc_info_mem = NULL;
+}
+
+/**
+ * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
+ * @iwdev: iwarp device
+ * @idx: the index of the mac ip address to delete
+ */
+static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
+{
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       enum i40iw_status_code status = 0;
+
+       cqp_request = i40iw_get_cqp_request(iwcqp, true);
+       if (!cqp_request) {
+               i40iw_pr_err("cqp_request memory failed\n");
+               return;
+       }
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+       cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+       cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
+       cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
+}
+
+/**
+ * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
+ * @iwdev: iwarp device
+ * @mac_addr: pointer to mac address
+ * @idx: the index of the mac ip address to add
+ */
+static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
+                                                        u8 *mac_addr,
+                                                        u8 idx)
+{
+       struct i40iw_local_mac_ipaddr_entry_info *info;
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       enum i40iw_status_code status = 0;
+
+       cqp_request = i40iw_get_cqp_request(iwcqp, true);
+       if (!cqp_request) {
+               i40iw_pr_err("cqp_request memory failed\n");
+               return I40IW_ERR_NO_MEMORY;
+       }
+
+       cqp_info = &cqp_request->info;
+
+       cqp_info->post_sq = 1;
+       info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
+       ether_addr_copy(info->mac_addr, mac_addr);
+       info->entry_idx = idx;
+       cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+       cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
+       cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+       cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
+       return status;
+}
+
+/**
+ * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
+ * @iwdev: iwarp device
+ * @mac_ip_tbl_idx: the index of the new mac ip address
+ *
+ * Allocate a mac ip address entry and update the mac_ip_tbl_idx
+ * to hold the index of the newly created mac ip address
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
+                                                                u16 *mac_ip_tbl_idx)
+{
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       enum i40iw_status_code status = 0;
+
+       cqp_request = i40iw_get_cqp_request(iwcqp, true);
+       if (!cqp_request) {
+               i40iw_pr_err("cqp_request memory failed\n");
+               return I40IW_ERR_NO_MEMORY;
+       }
+
+       /* increment refcount, because we need the cqp request ret value */
+       atomic_inc(&cqp_request->refcount);
+
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
+       cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (!status)
+               *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
+       else
+               i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
+       /* decrement refcount and free the cqp request, if no longer used */
+       i40iw_put_cqp_request(iwcqp, cqp_request);
+       return status;
+}
+
+/**
+ * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
+ * @iwdev: iwarp device
+ * @macaddr: pointer to mac address
+ *
+ * Allocate a mac ip address entry and add it to the hw table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
+                                                        u8 *macaddr)
+{
+       enum i40iw_status_code status;
+
+       status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
+       if (!status) {
+               status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+                                                   (u8)iwdev->mac_ip_table_idx);
+               if (!status)
+                       status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
+                                                           (u8)iwdev->mac_ip_table_idx);
+               else
+                       i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+       }
+       return status;
+}
+
+/**
+ * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
+{
+       struct net_device *ip_dev;
+       struct inet6_dev *idev;
+       struct inet6_ifaddr *ifp;
+       __be32 local_ipaddr6[4];
+
+       rcu_read_lock();
+       for_each_netdev_rcu(&init_net, ip_dev) {
+               if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
+                     (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
+                    (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
+                       idev = __in6_dev_get(ip_dev);
+                       if (!idev) {
+                               i40iw_pr_err("ipv6 inet device not found\n");
+                               break;
+                       }
+                       list_for_each_entry(ifp, &idev->addr_list, if_list) {
+                               i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
+                                             rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
+                               i40iw_copy_ip_ntohl(local_ipaddr6,
+                                                   ifp->addr.in6_u.u6_addr32);
+                               i40iw_manage_arp_cache(iwdev,
+                                                      ip_dev->dev_addr,
+                                                      local_ipaddr6,
+                                                      false,
+                                                      I40IW_ARP_ADD);
+                       }
+               }
+       }
+       rcu_read_unlock();
+}
+
+/**
+ * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
+ * @iwdev: iwarp device
+ */
+static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
+{
+       struct net_device *dev;
+       struct in_device *idev;
+       bool got_lock = true;
+       u32 ip_addr;
+
+       if (!rtnl_trylock())
+               got_lock = false;
+
+       for_each_netdev(&init_net, dev) {
+               if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
+                     (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
+                   (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
+                       idev = in_dev_get(dev);
+                       for_ifa(idev) {
+                               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
+                                           "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
+                                            rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
+
+                               ip_addr = ntohl(ifa->ifa_address);
+                               i40iw_manage_arp_cache(iwdev,
+                                                      dev->dev_addr,
+                                                      &ip_addr,
+                                                      true,
+                                                      I40IW_ARP_ADD);
+                       }
+                       endfor_ifa(idev);
+                       in_dev_put(idev);
+               }
+       }
+       if (got_lock)
+               rtnl_unlock();
+}
+
+/**
+ * i40iw_add_mac_ip - add mac and ip addresses
+ * @iwdev: iwarp device
+ *
+ * Create and add a mac ip address entry to the hw table and
+ * ipv4/ipv6 addresses to the arp cache
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
+{
+       struct net_device *netdev = iwdev->netdev;
+       enum i40iw_status_code status;
+
+       status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
+       if (status)
+               return status;
+       i40iw_add_ipv4_addr(iwdev);
+       i40iw_add_ipv6_addr(iwdev);
+       return 0;
+}
+
+/**
+ * i40iw_wait_pe_ready - Check if firmware is ready
+ * @hw: provides access to registers
+ */
+static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
+{
+       u32 statusfw;
+       u32 statuscpu0;
+       u32 statuscpu1;
+       u32 statuscpu2;
+       u32 retrycount = 0;
+
+       do {
+               statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
+               i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
+               statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
+               i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
+               statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
+               i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
+                             __LINE__, statuscpu1);
+               statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
+               i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
+                             __LINE__, statuscpu2);
+               if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
+                       break;  /* SUCCESS */
+               mdelay(1000);
+               retrycount++;
+       } while (retrycount < 14);
+       i40iw_wr32(hw, 0xb4040, 0x4C104C5);
+}
+
+/**
+ * i40iw_initialize_dev - initialize device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate memory for the hmc objects and initialize iwdev
+ * Return 0 if successful, otherwise clean up the resources
+ * and return error
+ */
+static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
+                                                  struct i40e_info *ldev)
+{
+       enum i40iw_status_code status;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_device_init_info info;
+       struct i40iw_dma_mem mem;
+       u32 size;
+
+       memset(&info, 0, sizeof(info));
+       size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
+                               (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
+       iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
+       if (!iwdev->hmc_info_mem) {
+               i40iw_pr_err("memory alloc fail\n");
+               return I40IW_ERR_NO_MEMORY;
+       }
+       iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
+       dev->hmc_info = &iwdev->hw.hmc;
+       dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
+       status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
+                                      I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+       if (status)
+               goto exit;
+       info.fpm_query_buf_pa = mem.pa;
+       info.fpm_query_buf = mem.va;
+       status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
+                                      I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
+       if (status)
+               goto exit;
+       info.fpm_commit_buf_pa = mem.pa;
+       info.fpm_commit_buf = mem.va;
+       info.hmc_fn_id = ldev->fid;
+       info.is_pf = (ldev->ftype) ? false : true;
+       info.bar0 = ldev->hw_addr;
+       info.hw = &iwdev->hw;
+       info.debug_mask = debug;
+       info.qs_handle = ldev->params.qos.prio_qos[0].qs_handle;
+       info.exception_lan_queue = 1;
+       info.vchnl_send = i40iw_virtchnl_send;
+       status = i40iw_device_init(&iwdev->sc_dev, &info);
+exit:
+       if (status) {
+               kfree(iwdev->hmc_info_mem);
+               iwdev->hmc_info_mem = NULL;
+       }
+       return status;
+}
+
+/**
+ * i40iw_register_notifiers - register tcp ip notifiers
+ */
+static void i40iw_register_notifiers(void)
+{
+       if (!i40iw_notifiers_registered) {
+               register_inetaddr_notifier(&i40iw_inetaddr_notifier);
+               register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+               register_netevent_notifier(&i40iw_net_notifier);
+       }
+       i40iw_notifiers_registered++;
+}
+
+/**
+ * i40iw_save_msix_info - copy msix vector information to iwarp device
+ * @iwdev: iwarp device
+ * @ldev: lan device information
+ *
+ * Allocate iwdev msix table and copy the ldev msix info to the table
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
+                                                  struct i40e_info *ldev)
+{
+       struct i40e_qvlist_info *iw_qvlist;
+       struct i40e_qv_info *iw_qvinfo;
+       u32 ceq_idx;
+       u32 i;
+       u32 size;
+
+       iwdev->msix_count = ldev->msix_count;
+
+       size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
+       size += sizeof(struct i40e_qvlist_info);
+       size +=  sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
+       iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
+
+       if (!iwdev->iw_msixtbl)
+               return I40IW_ERR_NO_MEMORY;
+       iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
+       iw_qvlist = iwdev->iw_qvlist;
+       iw_qvinfo = iw_qvlist->qv_info;
+       iw_qvlist->num_vectors = iwdev->msix_count;
+       if (iwdev->msix_count <= num_online_cpus())
+               iwdev->msix_shared = true;
+       for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
+               iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
+               iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
+               if (i == 0) {
+                       iw_qvinfo->aeq_idx = 0;
+                       if (iwdev->msix_shared)
+                               iw_qvinfo->ceq_idx = ceq_idx++;
+                       else
+                               iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
+               } else {
+                       iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
+                       iw_qvinfo->ceq_idx = ceq_idx++;
+               }
+               iw_qvinfo->itr_idx = 3;
+               iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_deinit_device - clean up the device resources
+ * @iwdev: iwarp device
+ * @reset: true if called before reset
+ * @del_hdl: true if delete hdl entry
+ *
+ * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
+ * destroy the device queues and free the pble and the hmc objects
+ */
+static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset, bool del_hdl)
+{
+       struct i40e_info *ldev = iwdev->ldev;
+
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+
+       i40iw_pr_info("state = %d\n", iwdev->init_state);
+
+       switch (iwdev->init_state) {
+       case RDMA_DEV_REGISTERED:
+               iwdev->iw_status = 0;
+               i40iw_port_ibevent(iwdev);
+               i40iw_destroy_rdma_device(iwdev->iwibdev);
+               /* fallthrough */
+       case IP_ADDR_REGISTERED:
+               if (!reset)
+                       i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
+               /* fallthrough */
+       case INET_NOTIFIER:
+               if (i40iw_notifiers_registered > 0) {
+                       i40iw_notifiers_registered--;
+                       unregister_netevent_notifier(&i40iw_net_notifier);
+                       unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
+                       unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
+               }
+               /* fallthrough */
+       case CEQ_CREATED:
+               i40iw_dele_ceqs(iwdev, reset);
+               /* fallthrough */
+       case AEQ_CREATED:
+               i40iw_destroy_aeq(iwdev, reset);
+               /* fallthrough */
+       case IEQ_CREATED:
+               i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_IEQ, reset);
+               /* fallthrough */
+       case ILQ_CREATED:
+               i40iw_puda_dele_resources(dev, I40IW_PUDA_RSRC_TYPE_ILQ, reset);
+               /* fallthrough */
+       case CCQ_CREATED:
+               i40iw_destroy_ccq(iwdev, reset);
+               /* fallthrough */
+       case PBLE_CHUNK_MEM:
+               i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
+               /* fallthrough */
+       case HMC_OBJS_CREATED:
+               i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset);
+               /* fallthrough */
+       case CQP_CREATED:
+               i40iw_destroy_cqp(iwdev, !reset);
+               /* fallthrough */
+       case INITIAL_STATE:
+               i40iw_cleanup_cm_core(&iwdev->cm_core);
+               if (dev->is_pf)
+                       i40iw_hw_stats_del_timer(dev);
+
+               i40iw_del_init_mem(iwdev);
+               break;
+       case INVALID_STATE:
+               /* fallthrough */
+       default:
+               i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
+               break;
+       }
+
+       if (del_hdl)
+               i40iw_del_handler(i40iw_find_i40e_handler(ldev));
+       kfree(iwdev->hdl);
+}
+
+/**
+ * i40iw_setup_init_state - set up the initial device struct
+ * @hdl: handler for iwarp device - one per instance
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Initialize the iwarp device and its hdl information
+ * using the ldev and client information
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
+                                                    struct i40e_info *ldev,
+                                                    struct i40e_client *client)
+{
+       struct i40iw_device *iwdev = &hdl->device;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+
+       memcpy(&hdl->ldev, ldev, sizeof(*ldev));
+       if (resource_profile == 1)
+               resource_profile = 2;
+
+       iwdev->mpa_version = mpa_version;
+       iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
+           (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
+           I40IW_HMC_PROFILE_DEFAULT;
+       iwdev->max_rdma_vfs =
+               (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ?  max_rdma_vfs : 0;
+       iwdev->netdev = ldev->netdev;
+       hdl->client = client;
+       iwdev->mss = (!ldev->params.mtu) ? I40IW_DEFAULT_MSS : ldev->params.mtu - I40IW_MTU_TO_MSS;
+       if (!ldev->ftype)
+               iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
+       else
+               iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
+
+       status = i40iw_save_msix_info(iwdev, ldev);
+       if (status)
+               goto exit;
+       iwdev->hw.dev_context = (void *)ldev->pcidev;
+       iwdev->hw.hw_addr = ldev->hw_addr;
+       status = i40iw_allocate_dma_mem(&iwdev->hw,
+                                       &iwdev->obj_mem, 8192, 4096);
+       if (status)
+               goto exit;
+       iwdev->obj_next = iwdev->obj_mem;
+       iwdev->push_mode = push_mode;
+       init_waitqueue_head(&iwdev->vchnl_waitq);
+       status = i40iw_initialize_dev(iwdev, ldev);
+exit:
+       if (status) {
+               kfree(iwdev->iw_msixtbl);
+               i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
+               iwdev->iw_msixtbl = NULL;
+       }
+       return status;
+}
+
+/**
+ * i40iw_open - client interface operation open for iwarp/uda device
+ * @ldev: lan device information
+ * @client: iwarp client information, provided during registration
+ *
+ * Called by the lan driver during the processing of client register
+ * Create device resources, set up queues, pble and hmc objects and
+ * register the device with the ib verbs interface
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
+{
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_dev *dev;
+       enum i40iw_status_code status;
+       struct i40iw_handler *hdl;
+
+       hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
+       if (!hdl)
+               return -ENOMEM;
+       iwdev = &hdl->device;
+       iwdev->hdl = hdl;
+       dev = &iwdev->sc_dev;
+       i40iw_setup_cm_core(iwdev);
+
+       dev->back_dev = (void *)iwdev;
+       iwdev->ldev = &hdl->ldev;
+       iwdev->client = client;
+       mutex_init(&iwdev->pbl_mutex);
+       i40iw_add_handler(hdl);
+
+       do {
+               status = i40iw_setup_init_state(hdl, ldev, client);
+               if (status)
+                       break;
+               iwdev->init_state = INITIAL_STATE;
+               if (dev->is_pf)
+                       i40iw_wait_pe_ready(dev->hw);
+               status = i40iw_create_cqp(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = CQP_CREATED;
+               status = i40iw_hmc_setup(iwdev);
+               if (status)
+                       break;
+               status = i40iw_create_ccq(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = CCQ_CREATED;
+               status = i40iw_initialize_ilq(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = ILQ_CREATED;
+               status = i40iw_initialize_ieq(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = IEQ_CREATED;
+               status = i40iw_setup_aeq(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = AEQ_CREATED;
+               status = i40iw_setup_ceqs(iwdev, ldev);
+               if (status)
+                       break;
+               iwdev->init_state = CEQ_CREATED;
+               status = i40iw_initialize_hw_resources(iwdev);
+               if (status)
+                       break;
+               dev->ccq_ops->ccq_arm(dev->ccq);
+               status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
+               if (status)
+                       break;
+               iwdev->virtchnl_wq = create_singlethread_workqueue("iwvch");
+               i40iw_register_notifiers();
+               iwdev->init_state = INET_NOTIFIER;
+               status = i40iw_add_mac_ip(iwdev);
+               if (status)
+                       break;
+               iwdev->init_state = IP_ADDR_REGISTERED;
+               if (i40iw_register_rdma_device(iwdev)) {
+                       i40iw_pr_err("register rdma device fail\n");
+                       break;
+               };
+
+               iwdev->init_state = RDMA_DEV_REGISTERED;
+               iwdev->iw_status = 1;
+               i40iw_port_ibevent(iwdev);
+               i40iw_pr_info("i40iw_open completed\n");
+               return 0;
+       } while (0);
+
+       i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
+       i40iw_deinit_device(iwdev, false, false);
+       return -ERESTART;
+}
+
+/**
+ * i40iw_l2param_change : handle qs handles for qos and mss change
+ * @ldev: lan device information
+ * @client: client for paramater change
+ * @params: new parameters from L2
+ */
+static void i40iw_l2param_change(struct i40e_info *ldev,
+                                struct i40e_client *client,
+                                struct i40e_params *params)
+{
+       struct i40iw_handler *hdl;
+       struct i40iw_device *iwdev;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+
+       iwdev = &hdl->device;
+       if (params->mtu)
+               iwdev->mss = params->mtu - I40IW_MTU_TO_MSS;
+}
+
+/**
+ * i40iw_close - client interface operation close for iwarp/uda device
+ * @ldev: lan device information
+ * @client: client to close
+ *
+ * Called by the lan driver during the processing of client unregister
+ * Destroy and clean up the driver resources
+ */
+static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
+{
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *hdl;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+
+       iwdev = &hdl->device;
+       destroy_workqueue(iwdev->virtchnl_wq);
+       i40iw_deinit_device(iwdev, reset, true);
+}
+
+/**
+ * i40iw_vf_reset - process VF reset
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Called when a VF is reset by the PF
+ * Destroy and clean up the VF resources
+ */
+static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
+{
+       struct i40iw_handler *hdl;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_hmc_fcn_info hmc_fcn_info;
+       struct i40iw_virt_mem vf_dev_mem;
+       struct i40iw_vfdev *tmp_vfdev;
+       unsigned int i;
+       unsigned long flags;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+
+       dev = &hdl->device.sc_dev;
+
+       for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
+               if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
+                       continue;
+
+               /* free all resources allocated on behalf of vf */
+               tmp_vfdev = dev->vf_dev[i];
+               spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+               dev->vf_dev[i] = NULL;
+               spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+               i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
+               /* remove vf hmc function */
+               memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
+               hmc_fcn_info.vf_id = vf_id;
+               hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
+               hmc_fcn_info.free_fcn = true;
+               i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+               /* free vf_dev */
+               vf_dev_mem.va = tmp_vfdev;
+               vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
+                                       sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
+               i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+               break;
+       }
+}
+
+/**
+ * i40iw_vf_enable - enable a number of VFs
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @num_vfs: number of VFs for the PF
+ *
+ * Called when the number of VFs changes
+ */
+static void i40iw_vf_enable(struct i40e_info *ldev,
+                           struct i40e_client *client,
+                           u32 num_vfs)
+{
+       struct i40iw_handler *hdl;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return;
+
+       if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
+               hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
+       else
+               hdl->device.max_enabled_vfs = num_vfs;
+}
+
+/**
+ * i40iw_vf_capable - check if VF capable
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id
+ *
+ * Return 1 if a VF slot is available or if VF is already RDMA enabled
+ * Return 0 otherwise
+ */
+static int i40iw_vf_capable(struct i40e_info *ldev,
+                           struct i40e_client *client,
+                           u32 vf_id)
+{
+       struct i40iw_handler *hdl;
+       struct i40iw_sc_dev *dev;
+       unsigned int i;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return 0;
+
+       dev = &hdl->device.sc_dev;
+
+       for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
+               if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_virtchnl_receive - receive a message through the virtual channel
+ * @ldev: lan device information
+ * @client: client interface instance
+ * @vf_id: virtual function id associated with the message
+ * @msg: message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel receive operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static int i40iw_virtchnl_receive(struct i40e_info *ldev,
+                                 struct i40e_client *client,
+                                 u32 vf_id,
+                                 u8 *msg,
+                                 u16 len)
+{
+       struct i40iw_handler *hdl;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_device *iwdev;
+       int ret_code = I40IW_NOT_SUPPORTED;
+
+       if (!len || !msg)
+               return I40IW_ERR_PARAM;
+
+       hdl = i40iw_find_i40e_handler(ldev);
+       if (!hdl)
+               return I40IW_ERR_PARAM;
+
+       dev = &hdl->device.sc_dev;
+       iwdev = dev->back_dev;
+
+       i40iw_debug(dev, I40IW_DEBUG_VIRT, "msg %p, message length %u\n", msg, len);
+
+       if (dev->vchnl_if.vchnl_recv) {
+               ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
+               if (!dev->is_pf) {
+                       atomic_dec(&iwdev->vchnl_msgs);
+                       wake_up(&iwdev->vchnl_waitq);
+               }
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_virtchnl_send - send a message through the virtual channel
+ * @dev: iwarp device
+ * @vf_id: virtual function id associated with the message
+ * @msg: virtual channel message buffer pointer
+ * @len: length of the message
+ *
+ * Invoke virtual channel send operation for the given msg
+ * Return 0 if successful, otherwise return error
+ */
+static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
+                                                 u32 vf_id,
+                                                 u8 *msg,
+                                                 u16 len)
+{
+       struct i40iw_device *iwdev;
+       struct i40e_info *ldev;
+       enum i40iw_status_code ret_code = I40IW_ERR_BAD_PTR;
+
+       if (!dev || !dev->back_dev)
+               return ret_code;
+
+       iwdev = dev->back_dev;
+       ldev = iwdev->ldev;
+
+       if (ldev && ldev->ops && ldev->ops->virtchnl_send)
+               ret_code = ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
+
+       return ret_code;
+}
+
+/* client interface functions */
+static struct i40e_client_ops i40e_ops = {
+       .open = i40iw_open,
+       .close = i40iw_close,
+       .l2_param_change = i40iw_l2param_change,
+       .virtchnl_receive = i40iw_virtchnl_receive,
+       .vf_reset = i40iw_vf_reset,
+       .vf_enable = i40iw_vf_enable,
+       .vf_capable = i40iw_vf_capable
+};
+
+/**
+ * i40iw_init_module - driver initialization function
+ *
+ * First function to call when the driver is loaded
+ * Register the driver as i40e client and port mapper client
+ */
+static int __init i40iw_init_module(void)
+{
+       int ret;
+
+       memset(&i40iw_client, 0, sizeof(i40iw_client));
+       i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
+       i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
+       i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
+       i40iw_client.ops = &i40e_ops;
+       memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
+       i40iw_client.type = I40E_CLIENT_IWARP;
+       spin_lock_init(&i40iw_handler_lock);
+       ret = i40e_register_client(&i40iw_client);
+       return ret;
+}
+
+/**
+ * i40iw_exit_module - driver exit clean up function
+ *
+ * The function is called just before the driver is unloaded
+ * Unregister the driver as i40e client and port mapper client
+ */
+static void __exit i40iw_exit_module(void)
+{
+       i40e_unregister_client(&i40iw_client);
+}
+
+module_init(i40iw_init_module);
+module_exit(i40iw_exit_module);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h
new file mode 100644 (file)
index 0000000..7e20493
--- /dev/null
@@ -0,0 +1,215 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_OSDEP_H
+#define I40IW_OSDEP_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/tcp.h>
+#include <crypto/hash.h>
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define STATS_TIMER_DELAY 1000
+
+static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value)
+{
+       wqe_words[byte_index >> 3] = value;
+}
+
+/**
+ * set_32bit_val - set 32 value to hw wqe
+ * @wqe_words: wqe addr to write
+ * @byte_index: index in wqe
+ * @value: value to write
+ **/
+static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value)
+{
+       wqe_words[byte_index >> 2] = value;
+}
+
+/**
+ * get_64bit_val - read 64 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to read from
+ * @value: read value
+ **/
+static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value)
+{
+       *value = wqe_words[byte_index >> 3];
+}
+
+/**
+ * get_32bit_val - read 32 bit value from wqe
+ * @wqe_words: wqe addr
+ * @byte_index: index to reaad from
+ * @value: return 32 bit value
+ **/
+static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value)
+{
+       *value = wqe_words[byte_index >> 2];
+}
+
+struct i40iw_dma_mem {
+       void *va;
+       dma_addr_t pa;
+       u32 size;
+} __packed;
+
+struct i40iw_virt_mem {
+       void *va;
+       u32 size;
+} __packed;
+
+#define i40iw_debug(h, m, s, ...)                               \
+do {                                                            \
+       if (((m) & (h)->debug_mask))                            \
+               pr_info("i40iw " s, ##__VA_ARGS__);             \
+} while (0)
+
+#define i40iw_flush(a)          readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+#define I40E_GLHMC_VFSDCMD(_i)  (0x000C8000 + ((_i) * 4)) \
+                               /* _i=0...31 */
+#define I40E_GLHMC_VFSDCMD_MAX_INDEX    31
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT  0
+#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK  (0xFFF \
+                                         << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PF_SHIFT       16
+#define I40E_GLHMC_VFSDCMD_PF_MASK        (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_VF_SHIFT       20
+#define I40E_GLHMC_VFSDCMD_VF_MASK        (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29
+#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK  (0x3 \
+                                          << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT)
+#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT   31
+#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK  (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT)
+
+#define I40E_GLHMC_VFSDDATAHIGH(_i)     (0x000C8200 + ((_i) * 4)) \
+                               /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX       31
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0
+#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK  (0xFFFFFFFF \
+                       << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT)
+
+#define I40E_GLHMC_VFSDDATALOW(_i)      (0x000C8100 + ((_i) * 4)) \
+                               /* _i=0...31 */
+#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX        31
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT   0
+#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK  (0x1 \
+                       << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT    1
+#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK  (0x1 \
+                       << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK  (0x3FF \
+                       << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12
+#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK  (0xFFFFF \
+                       << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT)
+
+#define I40E_GLPE_FWLDSTATUS                     0x0000D200
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0
+#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK  (0x1 \
+                       << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT           1
+#define I40E_GLPE_FWLDSTATUS_DONE_MASK  (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT       2
+#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK  (0x1 \
+                        << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT       3
+#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK  (0x1 \
+                        << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT)
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT       4
+#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK  (0x1 \
+                        << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT)
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_puda_buf;
+struct i40iw_puda_completion_info;
+struct i40iw_update_sds_info;
+struct i40iw_hmc_fcn_info;
+struct i40iw_virtchnl_work_info;
+struct i40iw_manage_vf_pble_info;
+struct i40iw_device;
+struct i40iw_hmc_info;
+struct i40iw_hw;
+
+u8 __iomem *i40iw_get_hw_addr(void *dev);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr,
+                                             u32 length, u32 value);
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum);
+void i40iw_free_hash_desc(struct shash_desc *);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+                                                struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+                                        struct i40iw_update_sds_info *info);
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+                                                   struct i40iw_hmc_fcn_info *hmcfcninfo);
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+                                                     struct i40iw_dma_mem *values_mem,
+                                                     u8 hmc_fn_id);
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+                                                      struct i40iw_dma_mem *values_mem,
+                                                      u8 hmc_fn_id);
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+                                                struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev,
+                                                  struct i40iw_manage_vf_pble_info *info);
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+                           struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
+void *i40iw_remove_head(struct list_head *list);
+
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp);
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp);
+
+enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev,
+                                                 struct i40iw_manage_vf_pble_info *info,
+                                                 bool wait);
+struct i40iw_dev_pestat;
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *);
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *);
+#define i40iw_mmiowb() mmiowb()
+void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value);
+u32  i40iw_rd32(struct i40iw_hw *hw, u32 reg);
+#endif                         /* _I40IW_OSDEP_H_ */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
new file mode 100644 (file)
index 0000000..a0b8ca1
--- /dev/null
@@ -0,0 +1,106 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_P_H
+#define I40IW_P_H
+
+#define PAUSE_TIMER_VALUE  0xFFFF
+#define REFRESH_THRESHOLD  0x7FFF
+#define HIGH_THRESHOLD     0x800
+#define LOW_THRESHOLD      0x200
+#define ALL_TC2PFC         0xFF
+
+void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask,
+                    char *desc, u64 *buf, u32 size);
+/* init operations */
+enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
+                                        struct i40iw_device_init_info *info);
+
+enum i40iw_status_code i40iw_device_init_pestat(struct i40iw_dev_pestat *);
+
+void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp);
+
+u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
+                                                struct i40iw_fast_reg_stag_info *info,
+                                                bool post_sq);
+
+/* HMC/FPM functions */
+enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
+                                           u8 hmc_fn_id);
+
+enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id,
+                                          u32 *vf_cnt_array);
+
+/* cqp misc functions */
+
+void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp);
+
+void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info);
+
+enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
+                                          struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
+                                         struct i40iw_sc_qp *qp, u64 scratch);
+
+enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp,
+                                                          u64 scratch, u8 hmc_fn_id,
+                                                          bool post_sq,
+                                                          bool poll_registers);
+
+enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count);
+
+void free_sd_mem(struct i40iw_sc_dev *dev);
+
+enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
+                                            struct cqp_commands_info *pcmdinfo);
+
+enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev);
+
+/* prototype for functions used for dynamic memory allocation */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+                                             struct i40iw_dma_mem *mem, u64 size,
+                                             u32 alignment);
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem);
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+                                              struct i40iw_virt_mem *mem, u32 size);
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+                                          struct i40iw_virt_mem *mem);
+u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq);
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
new file mode 100644 (file)
index 0000000..ded853d
--- /dev/null
@@ -0,0 +1,618 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_status.h"
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+
+#include <linux/pci.h>
+#include <linux/genalloc.h>
+#include <linux/vmalloc.h>
+#include "i40iw_pble.h"
+#include "i40iw.h"
+
+struct i40iw_device;
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_pble_rsrc *pble_rsrc);
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
+
+/**
+ * i40iw_destroy_pble_pool - destroy pool during module unload
+ * @pble_rsrc: pble resources
+ */
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+       struct list_head *clist;
+       struct list_head *tlist;
+       struct i40iw_chunk *chunk;
+       struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
+
+       if (pinfo->pool) {
+               list_for_each_safe(clist, tlist, &pinfo->clist) {
+                       chunk = list_entry(clist, struct i40iw_chunk, list);
+                       if (chunk->type == I40IW_VMALLOC)
+                               i40iw_free_vmalloc_mem(dev->hw, chunk);
+                       kfree(chunk);
+               }
+               gen_pool_destroy(pinfo->pool);
+       }
+}
+
+/**
+ * i40iw_hmc_init_pble - Initialize pble resources during module load
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ */
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+                                          struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+       struct i40iw_hmc_info *hmc_info;
+       u32 fpm_idx = 0;
+
+       hmc_info = dev->hmc_info;
+       pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
+       /* Now start the pble' on 4k boundary */
+       if (pble_rsrc->fpm_base_addr & 0xfff)
+               fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
+
+       pble_rsrc->unallocated_pble =
+           hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
+       pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
+
+       pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
+       pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
+       INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
+       if (!pble_rsrc->pinfo.pool)
+               goto error;
+
+       if (add_pble_pool(dev, pble_rsrc))
+               goto error;
+
+       return 0;
+
+ error:i40iw_destroy_pble_pool(dev, pble_rsrc);
+       return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_sd_pd_idx -  Returns sd index, pd index and rel_pd_idx from fpm address
+ * @ pble_rsrc:        structure containing fpm address
+ * @ idx: where to return indexes
+ */
+static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                struct sd_pd_idx *idx)
+{
+       idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
+       idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
+       idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
+}
+
+/**
+ * add_sd_direct - add sd direct for pble
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource ptr
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                           struct i40iw_add_page_info *info)
+{
+       enum i40iw_status_code ret_code = 0;
+       struct sd_pd_idx *idx = &info->idx;
+       struct i40iw_chunk *chunk = info->chunk;
+       struct i40iw_hmc_info *hmc_info = info->hmc_info;
+       struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+       u32 offset = 0;
+
+       if (!sd_entry->valid) {
+               if (dev->is_pf) {
+                       ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+                                                           info->idx.sd_idx,
+                                                           I40IW_SD_TYPE_DIRECT,
+                                                           I40IW_HMC_DIRECT_BP_SIZE);
+                       if (ret_code)
+                               return ret_code;
+                       chunk->type = I40IW_DMA_COHERENT;
+               }
+       }
+       offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
+       chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
+       chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
+       chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+       i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
+                   chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
+       return 0;
+}
+
+/**
+ * i40iw_free_vmalloc_mem - free vmalloc during close
+ * @hw: hw struct
+ * @chunk: chunk information for vmalloc
+ */
+static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
+{
+       struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+       int i;
+
+       if (!chunk->pg_cnt)
+               goto done;
+       for (i = 0; i < chunk->pg_cnt; i++)
+               dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+ done:
+       kfree(chunk->dmaaddrs);
+       chunk->dmaaddrs = NULL;
+       vfree(chunk->vaddr);
+       chunk->vaddr = NULL;
+       chunk->type = 0;
+}
+
+/**
+ * i40iw_get_vmalloc_mem - get 2M page for sd
+ * @hw: hardware address
+ * @chunk: chunk to adf
+ * @pg_cnt: #of 4 K pages
+ */
+static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
+                                                   struct i40iw_chunk *chunk,
+                                                   int pg_cnt)
+{
+       struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+       struct page *page;
+       u8 *addr;
+       u32 size;
+       int i;
+
+       chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
+       if (!chunk->dmaaddrs)
+               return I40IW_ERR_NO_MEMORY;
+       size = PAGE_SIZE * pg_cnt;
+       chunk->vaddr = vmalloc(size);
+       if (!chunk->vaddr) {
+               kfree(chunk->dmaaddrs);
+               chunk->dmaaddrs = NULL;
+               return I40IW_ERR_NO_MEMORY;
+       }
+       chunk->size = size;
+       addr = (u8 *)chunk->vaddr;
+       for (i = 0; i < pg_cnt; i++) {
+               page = vmalloc_to_page((void *)addr);
+               if (!page)
+                       break;
+               chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
+                                                 PAGE_SIZE, DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
+                       break;
+               addr += PAGE_SIZE;
+       }
+
+       chunk->pg_cnt = i;
+       chunk->type = I40IW_VMALLOC;
+       if (i == pg_cnt)
+               return 0;
+
+       i40iw_free_vmalloc_mem(hw, chunk);
+       return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * fpm_to_idx - given fpm address, get pble index
+ * @pble_rsrc: pble resource management
+ * @addr: fpm address for index
+ */
+static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
+{
+       return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
+}
+
+/**
+ * add_bp_pages - add backing pages for sd
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @info: page info for sd
+ */
+static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
+                                          struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                          struct i40iw_add_page_info *info)
+{
+       u8 *addr;
+       struct i40iw_dma_mem mem;
+       struct i40iw_hmc_pd_entry *pd_entry;
+       struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
+       struct i40iw_hmc_info *hmc_info = info->hmc_info;
+       struct i40iw_chunk *chunk = info->chunk;
+       struct i40iw_manage_vf_pble_info vf_pble_info;
+       enum i40iw_status_code status = 0;
+       u32 rel_pd_idx = info->idx.rel_pd_idx;
+       u32 pd_idx = info->idx.pd_idx;
+       u32 i;
+
+       status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
+       if (status)
+               return I40IW_ERR_NO_MEMORY;
+       status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
+                                         info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
+                                         I40IW_HMC_DIRECT_BP_SIZE);
+       if (status) {
+               i40iw_free_vmalloc_mem(dev->hw, chunk);
+               return status;
+       }
+       if (!dev->is_pf) {
+               status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
+                                                    fpm_to_idx(pble_rsrc,
+                                                               pble_rsrc->next_fpm_addr),
+                                                    (info->pages << PBLE_512_SHIFT));
+               if (status) {
+                       i40iw_pr_err("allocate PBLEs in the PF.  Error %i\n", status);
+                       i40iw_free_vmalloc_mem(dev->hw, chunk);
+                       return status;
+               }
+       }
+       addr = chunk->vaddr;
+       for (i = 0; i < info->pages; i++) {
+               mem.pa = chunk->dmaaddrs[i];
+               mem.size = PAGE_SIZE;
+               mem.va = (void *)(addr);
+               pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
+               if (!pd_entry->valid) {
+                       status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
+                       if (status)
+                               goto error;
+                       addr += PAGE_SIZE;
+               } else {
+                       i40iw_pr_err("pd entry is valid expecting to be invalid\n");
+               }
+       }
+       if (!dev->is_pf) {
+               vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
+               vf_pble_info.inv_pd_ent = false;
+               vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
+               vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
+               vf_pble_info.sd_index = info->idx.sd_idx;
+               status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
+                                                   &vf_pble_info, true);
+               if (status) {
+                       i40iw_pr_err("CQP manage VF PBLE BP failed.  %i\n", status);
+                       goto error;
+               }
+       }
+       chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+       return 0;
+error:
+       i40iw_free_vmalloc_mem(dev->hw, chunk);
+       return status;
+}
+
+/**
+ * add_pble_pool - add a sd entry for pble resoure
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ */
+static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_pble_rsrc *pble_rsrc)
+{
+       struct i40iw_hmc_sd_entry *sd_entry;
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_chunk *chunk;
+       struct i40iw_add_page_info info;
+       struct sd_pd_idx *idx = &info.idx;
+       enum i40iw_status_code ret_code = 0;
+       enum i40iw_sd_entry_type sd_entry_type;
+       u64 sd_reg_val = 0;
+       u32 pages;
+
+       if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
+               return I40IW_ERR_NO_MEMORY;
+       if (pble_rsrc->next_fpm_addr & 0xfff) {
+               i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
+               return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
+       }
+       chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
+       if (!chunk)
+               return I40IW_ERR_NO_MEMORY;
+       hmc_info = dev->hmc_info;
+       chunk->fpm_addr = pble_rsrc->next_fpm_addr;
+       get_sd_pd_idx(pble_rsrc, idx);
+       sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
+       pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
+                       idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
+       pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
+       if (!pages) {
+               ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
+               goto error;
+       }
+       info.chunk = chunk;
+       info.hmc_info = hmc_info;
+       info.pages = pages;
+       info.sd_entry = sd_entry;
+       if (!sd_entry->valid) {
+               sd_entry_type = (!idx->rel_pd_idx &&
+                                (pages == I40IW_HMC_PD_CNT_IN_SD) &&
+                                dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
+       } else {
+               sd_entry_type = sd_entry->entry_type;
+       }
+       i40iw_debug(dev, I40IW_DEBUG_PBLE,
+                   "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
+                   pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
+       i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
+                   sd_entry_type, sd_entry->valid);
+
+       if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
+               ret_code = add_sd_direct(dev, pble_rsrc, &info);
+       if (ret_code)
+               sd_entry_type = I40IW_SD_TYPE_PAGED;
+       else
+               pble_rsrc->stats_direct_sds++;
+
+       if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
+               ret_code = add_bp_pages(dev, pble_rsrc, &info);
+               if (ret_code)
+                       goto error;
+               else
+                       pble_rsrc->stats_paged_sds++;
+       }
+
+       if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
+                             (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
+               i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
+               ret_code = I40IW_ERR_NO_MEMORY;
+               goto error;
+       }
+       pble_rsrc->next_fpm_addr += chunk->size;
+       i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
+                   pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
+       pble_rsrc->unallocated_pble -= (chunk->size >> 3);
+       list_add(&chunk->list, &pble_rsrc->pinfo.clist);
+       sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
+                       sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
+       if (sd_entry->valid)
+               return 0;
+       if (dev->is_pf)
+               ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
+                                           sd_reg_val, idx->sd_idx,
+                                           sd_entry->entry_type, true);
+       if (ret_code) {
+               i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
+               goto error;
+       }
+
+       sd_entry->valid = true;
+       return 0;
+ error:
+       kfree(chunk);
+       return ret_code;
+}
+
+/**
+ * free_lvl2 - fee level 2 pble
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ */
+static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                     struct i40iw_pble_alloc *palloc)
+{
+       u32 i;
+       struct gen_pool *pool;
+       struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+       struct i40iw_pble_info *root = &lvl2->root;
+       struct i40iw_pble_info *leaf = lvl2->leaf;
+
+       pool = pble_rsrc->pinfo.pool;
+
+       for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
+               if (leaf->addr)
+                       gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
+               else
+                       break;
+       }
+
+       if (root->addr)
+               gen_pool_free(pool, root->addr, (root->cnt << 3));
+
+       kfree(lvl2->leaf);
+       lvl2->leaf = NULL;
+}
+
+/**
+ * get_lvl2_pble - get level 2 pble resource
+ * @pble_rsrc: pble resource management
+ * @palloc: level 2 pble allocation
+ * @pool: pool pointer
+ */
+static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                           struct i40iw_pble_alloc *palloc,
+                                           struct gen_pool *pool)
+{
+       u32 lf4k, lflast, total, i;
+       u32 pblcnt = PBLE_PER_PAGE;
+       u64 *addr;
+       struct i40iw_pble_level2 *lvl2 = &palloc->level2;
+       struct i40iw_pble_info *root = &lvl2->root;
+       struct i40iw_pble_info *leaf;
+
+       /* number of full 512 (4K) leafs) */
+       lf4k = palloc->total_cnt >> 9;
+       lflast = palloc->total_cnt % PBLE_PER_PAGE;
+       total = (lflast == 0) ? lf4k : lf4k + 1;
+       lvl2->leaf_cnt = total;
+
+       leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
+       if (!leaf)
+               return I40IW_ERR_NO_MEMORY;
+       lvl2->leaf = leaf;
+       /* allocate pbles for the root */
+       root->addr = gen_pool_alloc(pool, (total << 3));
+       if (!root->addr) {
+               kfree(lvl2->leaf);
+               lvl2->leaf = NULL;
+               return I40IW_ERR_NO_MEMORY;
+       }
+       root->idx = fpm_to_idx(pble_rsrc,
+                              (u64)gen_pool_virt_to_phys(pool, root->addr));
+       root->cnt = total;
+       addr = (u64 *)root->addr;
+       for (i = 0; i < total; i++, leaf++) {
+               pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
+               leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
+               if (!leaf->addr)
+                       goto error;
+               leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
+
+               leaf->cnt = pblcnt;
+               *addr = (u64)leaf->idx;
+               addr++;
+       }
+       palloc->level = I40IW_LEVEL_2;
+       pble_rsrc->stats_lvl2++;
+       return 0;
+ error:
+       free_lvl2(pble_rsrc, palloc);
+       return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * get_lvl1_pble - get level 1 pble resource
+ * @dev: hardware control device structure
+ * @pble_rsrc: pble resource management
+ * @palloc: level 1 pble allocation
+ */
+static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
+                                           struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                           struct i40iw_pble_alloc *palloc)
+{
+       u64 *addr;
+       struct gen_pool *pool;
+       struct i40iw_pble_info *lvl1 = &palloc->level1;
+
+       pool = pble_rsrc->pinfo.pool;
+       addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
+
+       if (!addr)
+               return I40IW_ERR_NO_MEMORY;
+
+       palloc->level = I40IW_LEVEL_1;
+       lvl1->addr = (unsigned long)addr;
+       lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
+                              (unsigned long)addr));
+       lvl1->cnt = palloc->total_cnt;
+       pble_rsrc->stats_lvl1++;
+       return 0;
+}
+
+/**
+ * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pool: pointer to general purpose special memory pool descriptor
+ */
+static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
+                                                       struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                                       struct i40iw_pble_alloc *palloc,
+                                                       struct gen_pool *pool)
+{
+       enum i40iw_status_code status = 0;
+
+       status = get_lvl1_pble(dev, pble_rsrc, palloc);
+       if (status && (palloc->total_cnt > PBLE_PER_PAGE))
+               status = get_lvl2_pble(pble_rsrc, palloc, pool);
+       return status;
+}
+
+/**
+ * i40iw_get_pble - allocate pbles from the pool
+ * @dev: i40iw_sc_dev struct
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble (idx + pble addr)
+ * @pble_cnt: #of pbles requested
+ */
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+                                     struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                     struct i40iw_pble_alloc *palloc,
+                                     u32 pble_cnt)
+{
+       struct gen_pool *pool;
+       enum i40iw_status_code status = 0;
+       u32 max_sds = 0;
+       int i;
+
+       pool = pble_rsrc->pinfo.pool;
+       palloc->total_cnt = pble_cnt;
+       palloc->level = I40IW_LEVEL_0;
+       /*check first to see if we can get pble's without acquiring additional sd's */
+       status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+       if (!status)
+               goto exit;
+       max_sds = (palloc->total_cnt >> 18) + 1;
+       for (i = 0; i < max_sds; i++) {
+               status = add_pble_pool(dev, pble_rsrc);
+               if (status)
+                       break;
+               status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
+               if (!status)
+                       break;
+       }
+exit:
+       if (!status)
+               pble_rsrc->stats_alloc_ok++;
+       else
+               pble_rsrc->stats_alloc_fail++;
+
+       return status;
+}
+
+/**
+ * i40iw_free_pble - put pbles back into pool
+ * @pble_rsrc: pble resources
+ * @palloc: contains all inforamtion regarding pble resource being freed
+ */
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                    struct i40iw_pble_alloc *palloc)
+{
+       struct gen_pool *pool;
+
+       pool = pble_rsrc->pinfo.pool;
+       if (palloc->level == I40IW_LEVEL_2)
+               free_lvl2(pble_rsrc, palloc);
+       else
+               gen_pool_free(pool, palloc->level1.addr,
+                             (palloc->level1.cnt << 3));
+       pble_rsrc->stats_alloc_freed++;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h
new file mode 100644 (file)
index 0000000..7b1851d
--- /dev/null
@@ -0,0 +1,131 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PBLE_H
+#define I40IW_PBLE_H
+
+#define POOL_SHIFT      6
+#define PBLE_PER_PAGE   512
+#define I40IW_HMC_PAGED_BP_SHIFT 12
+#define PBLE_512_SHIFT  9
+
+enum i40iw_pble_level {
+       I40IW_LEVEL_0 = 0,
+       I40IW_LEVEL_1 = 1,
+       I40IW_LEVEL_2 = 2
+};
+
+enum i40iw_alloc_type {
+       I40IW_NO_ALLOC = 0,
+       I40IW_DMA_COHERENT = 1,
+       I40IW_VMALLOC = 2
+};
+
+struct i40iw_pble_info {
+       unsigned long addr;
+       u32 idx;
+       u32 cnt;
+};
+
+struct i40iw_pble_level2 {
+       struct i40iw_pble_info root;
+       struct i40iw_pble_info *leaf;
+       u32 leaf_cnt;
+};
+
+struct i40iw_pble_alloc {
+       u32 total_cnt;
+       enum i40iw_pble_level level;
+       union {
+               struct i40iw_pble_info level1;
+               struct i40iw_pble_level2 level2;
+       };
+};
+
+struct sd_pd_idx {
+       u32 sd_idx;
+       u32 pd_idx;
+       u32 rel_pd_idx;
+};
+
+struct i40iw_add_page_info {
+       struct i40iw_chunk *chunk;
+       struct i40iw_hmc_sd_entry *sd_entry;
+       struct i40iw_hmc_info *hmc_info;
+       struct sd_pd_idx idx;
+       u32 pages;
+};
+
+struct i40iw_chunk {
+       struct list_head list;
+       u32 size;
+       void *vaddr;
+       u64 fpm_addr;
+       u32 pg_cnt;
+       dma_addr_t *dmaaddrs;
+       enum i40iw_alloc_type type;
+};
+
+struct i40iw_pble_pool {
+       struct gen_pool *pool;
+       struct list_head clist;
+       u32 total_pble_alloc;
+       u32 free_pble_cnt;
+       u32 pool_shift;
+};
+
+struct i40iw_hmc_pble_rsrc {
+       u32 unallocated_pble;
+       u64 fpm_base_addr;
+       u64 next_fpm_addr;
+       struct i40iw_pble_pool pinfo;
+
+       u32 stats_direct_sds;
+       u32 stats_paged_sds;
+       u64 stats_alloc_ok;
+       u64 stats_alloc_fail;
+       u64 stats_alloc_freed;
+       u64 stats_lvl1;
+       u64 stats_lvl2;
+};
+
+void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc);
+enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
+                                          struct i40iw_hmc_pble_rsrc *pble_rsrc);
+void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc);
+enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
+                                     struct i40iw_hmc_pble_rsrc *pble_rsrc,
+                                     struct i40iw_pble_alloc *palloc,
+                                     u32 pble_cnt);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
new file mode 100644 (file)
index 0000000..8eb400d
--- /dev/null
@@ -0,0 +1,1436 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_puda.h"
+
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+                             struct i40iw_puda_buf *buf);
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid);
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
+                                                     *rsrc, bool initial);
+/**
+ * i40iw_puda_get_listbuf - get buffer from puda list
+ * @list: list to use for buffers (ILQ or IEQ)
+ */
+static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list)
+{
+       struct i40iw_puda_buf *buf = NULL;
+
+       if (!list_empty(list)) {
+               buf = (struct i40iw_puda_buf *)list->next;
+               list_del((struct list_head *)&buf->list);
+       }
+       return buf;
+}
+
+/**
+ * i40iw_puda_get_bufpool - return buffer from resource
+ * @rsrc: resource to use for buffer
+ */
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc)
+{
+       struct i40iw_puda_buf *buf = NULL;
+       struct list_head *list = &rsrc->bufpool;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+       buf = i40iw_puda_get_listbuf(list);
+       if (buf)
+               rsrc->avail_buf_count--;
+       else
+               rsrc->stats_buf_alloc_fail++;
+       spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+       return buf;
+}
+
+/**
+ * i40iw_puda_ret_bufpool - return buffer to rsrc list
+ * @rsrc: resource to use for buffer
+ * @buf: buffe to return to resouce
+ */
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+                           struct i40iw_puda_buf *buf)
+{
+       unsigned long   flags;
+
+       spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+       list_add(&buf->list, &rsrc->bufpool);
+       spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+       rsrc->avail_buf_count++;
+}
+
+/**
+ * i40iw_puda_post_recvbuf - set wqe for rcv buffer
+ * @rsrc: resource ptr
+ * @wqe_idx: wqe index to use
+ * @buf: puda buffer for rcv q
+ * @initial: flag if during init time
+ */
+static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
+                                   struct i40iw_puda_buf *buf, bool initial)
+{
+       u64 *wqe;
+       struct i40iw_sc_qp *qp = &rsrc->qp;
+       u64 offset24 = 0;
+
+       qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf;
+       wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+       i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                   "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__,
+                   wqe_idx, buf, wqe);
+       if (!initial)
+               get_64bit_val(wqe, 24, &offset24);
+
+       offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+       set_64bit_val(wqe, 24, offset24);
+
+       set_64bit_val(wqe, 0, buf->mem.pa);
+       set_64bit_val(wqe, 8,
+                     LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
+       set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_puda_replenish_rq - post rcv buffers
+ * @rsrc: resource to use for buffer
+ * @initial: flag if during init time
+ */
+static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc,
+                                                     bool initial)
+{
+       u32 i;
+       u32 invalid_cnt = rsrc->rxq_invalid_cnt;
+       struct i40iw_puda_buf *buf = NULL;
+
+       for (i = 0; i < invalid_cnt; i++) {
+               buf = i40iw_puda_get_bufpool(rsrc);
+               if (!buf)
+                       return I40IW_ERR_list_empty;
+               i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf,
+                                       initial);
+               rsrc->rx_wqe_idx =
+                   ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size);
+               rsrc->rxq_invalid_cnt--;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_puda_alloc_buf - allocate mem for buffer
+ * @dev: iwarp device
+ * @length: length of buffer
+ */
+static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev,
+                                                  u32 length)
+{
+       struct i40iw_puda_buf *buf = NULL;
+       struct i40iw_virt_mem buf_mem;
+       enum i40iw_status_code ret;
+
+       ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem,
+                                     sizeof(struct i40iw_puda_buf));
+       if (ret) {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA,
+                           "%s: error mem for buf\n", __func__);
+               return NULL;
+       }
+       buf = (struct i40iw_puda_buf *)buf_mem.va;
+       ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1);
+       if (ret) {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA,
+                           "%s: error dma mem for buf\n", __func__);
+               i40iw_free_virt_mem(dev->hw, &buf_mem);
+               return NULL;
+       }
+       buf->buf_mem.va = buf_mem.va;
+       buf->buf_mem.size = buf_mem.size;
+       return buf;
+}
+
+/**
+ * i40iw_puda_dele_buf - delete buffer back to system
+ * @dev: iwarp device
+ * @buf: buffer to free
+ */
+static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev,
+                               struct i40iw_puda_buf *buf)
+{
+       i40iw_free_dma_mem(dev->hw, &buf->mem);
+       i40iw_free_virt_mem(dev->hw, &buf->buf_mem);
+}
+
+/**
+ * i40iw_puda_get_next_send_wqe - return next wqe for processing
+ * @qp: puda qp for wqe
+ * @wqe_idx: wqe index for caller
+ */
+static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+       u64 *wqe = NULL;
+       enum i40iw_status_code ret_code = 0;
+
+       *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+       if (!*wqe_idx)
+               qp->swqe_polarity = !qp->swqe_polarity;
+       I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+       if (ret_code)
+               return wqe;
+       wqe = qp->sq_base[*wqe_idx].elem;
+
+       return wqe;
+}
+
+/**
+ * i40iw_puda_poll_info - poll cq for completion
+ * @cq: cq for poll
+ * @info: info return for successful completion
+ */
+static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq,
+                                                  struct i40iw_puda_completion_info *info)
+{
+       u64 qword0, qword2, qword3;
+       u64 *cqe;
+       u64 comp_ctx;
+       bool valid_bit;
+       u32 major_err, minor_err;
+       bool error;
+
+       cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
+       get_64bit_val(cqe, 24, &qword3);
+       valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID);
+
+       if (valid_bit != cq->cq_uk.polarity)
+               return I40IW_ERR_QUEUE_EMPTY;
+
+       i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
+       error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+       if (error) {
+               i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__);
+               major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR));
+               minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR));
+               info->compl_error = major_err << 16 | minor_err;
+               return I40IW_ERR_CQ_COMPL_ERROR;
+       }
+
+       get_64bit_val(cqe, 0, &qword0);
+       get_64bit_val(cqe, 16, &qword2);
+
+       info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+       info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+       get_64bit_val(cqe, 8, &comp_ctx);
+       info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+       info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+
+       if (info->q_type == I40IW_CQE_QTYPE_RQ) {
+               info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID);
+               info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO);
+               info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO);
+               info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN);
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_puda_poll_completion - processes completion for cq
+ * @dev: iwarp device
+ * @cq: cq getting interrupt
+ * @compl_err: return any completion err
+ */
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+                                                 struct i40iw_sc_cq *cq, u32 *compl_err)
+{
+       struct i40iw_qp_uk *qp;
+       struct i40iw_cq_uk *cq_uk = &cq->cq_uk;
+       struct i40iw_puda_completion_info info;
+       enum i40iw_status_code ret = 0;
+       struct i40iw_puda_buf *buf;
+       struct i40iw_puda_rsrc *rsrc;
+       void *sqwrid;
+       u8 cq_type = cq->cq_type;
+       unsigned long   flags;
+
+       if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) {
+               rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? dev->ilq : dev->ieq;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__);
+               return I40IW_ERR_BAD_PTR;
+       }
+       memset(&info, 0, sizeof(info));
+       ret = i40iw_puda_poll_info(cq, &info);
+       *compl_err = info.compl_error;
+       if (ret == I40IW_ERR_QUEUE_EMPTY)
+               return ret;
+       if (ret)
+               goto done;
+
+       qp = info.qp;
+       if (!qp || !rsrc) {
+               ret = I40IW_ERR_BAD_PTR;
+               goto done;
+       }
+
+       if (qp->qp_id != rsrc->qp_id) {
+               ret = I40IW_ERR_BAD_PTR;
+               goto done;
+       }
+
+       if (info.q_type == I40IW_CQE_QTYPE_RQ) {
+               buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx];
+               /* Get all the tcpip information in the buf header */
+               ret = i40iw_puda_get_tcpip_info(&info, buf);
+               if (ret) {
+                       rsrc->stats_rcvd_pkt_err++;
+                       if (cq_type == I40IW_CQ_TYPE_ILQ) {
+                               i40iw_ilq_putback_rcvbuf(&rsrc->qp,
+                                                        info.wqe_idx);
+                       } else {
+                               i40iw_puda_ret_bufpool(rsrc, buf);
+                               i40iw_puda_replenish_rq(rsrc, false);
+                       }
+                       goto done;
+               }
+
+               rsrc->stats_pkt_rcvd++;
+               rsrc->compl_rxwqe_idx = info.wqe_idx;
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__);
+               rsrc->receive(rsrc->dev, buf);
+               if (cq_type == I40IW_CQ_TYPE_ILQ)
+                       i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx);
+               else
+                       i40iw_puda_replenish_rq(rsrc, false);
+
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__);
+               sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid;
+               I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx);
+               rsrc->xmit_complete(rsrc->dev, sqwrid);
+               spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+               rsrc->tx_wqe_avail_cnt++;
+               spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+               if (!list_empty(&dev->ilq->txpend))
+                       i40iw_puda_send_buf(dev->ilq, NULL);
+       }
+
+done:
+       I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret);
+       if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0)
+               cq_uk->polarity = !cq_uk->polarity;
+       /* update cq tail in cq shadow memory also */
+       I40IW_RING_MOVE_TAIL(cq_uk->cq_ring);
+       set_64bit_val(cq_uk->shadow_area, 0,
+                     I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring));
+       return 0;
+}
+
+/**
+ * i40iw_puda_send - complete send wqe for transmit
+ * @qp: puda qp for send
+ * @info: buffer information for transmit
+ */
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+                                      struct i40iw_puda_send_info *info)
+{
+       u64 *wqe;
+       u32 iplen, l4len;
+       u64 header[2];
+       u32 wqe_idx;
+       u8 iipt;
+
+       /* number of 32 bits DWORDS in header */
+       l4len = info->tcplen >> 2;
+       if (info->ipv4) {
+               iipt = 3;
+               iplen = 5;
+       } else {
+               iipt = 1;
+               iplen = 10;
+       }
+
+       wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+       qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch;
+       /* Third line of WQE descriptor */
+       /* maclen is in words */
+       header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) |
+                   LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) |
+                   LS_64(iipt, I40IW_UDA_QPSQ_IIPT) |
+                   LS_64(l4len, I40IW_UDA_QPSQ_L4LEN);
+       /* Forth line of WQE descriptor */
+       header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) |
+                   LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) |
+                   LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) |
+                   LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID);
+
+       set_64bit_val(wqe, 0, info->paddr);
+       set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
+       set_64bit_val(wqe, 16, header[0]);
+       set_64bit_val(wqe, 24, header[1]);
+
+       i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
+       i40iw_qp_post_wr(&qp->qp_uk);
+       return 0;
+}
+
+/**
+ * i40iw_puda_send_buf - transmit puda buffer
+ * @rsrc: resource to use for buffer
+ * @buf: puda buffer to transmit
+ */
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf)
+{
+       struct i40iw_puda_send_info info;
+       enum i40iw_status_code ret = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&rsrc->bufpool_lock, flags);
+       /* if no wqe available or not from a completion and we have
+        * pending buffers, we must queue new buffer
+        */
+       if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) {
+               list_add_tail(&buf->list, &rsrc->txpend);
+               spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+               rsrc->stats_sent_pkt_q++;
+               if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+                       i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                   "%s: adding to txpend\n", __func__);
+               return;
+       }
+       rsrc->tx_wqe_avail_cnt--;
+       /* if we are coming from a completion and have pending buffers
+        * then Get one from pending list
+        */
+       if (!buf) {
+               buf = i40iw_puda_get_listbuf(&rsrc->txpend);
+               if (!buf)
+                       goto done;
+       }
+
+       info.scratch = (void *)buf;
+       info.paddr = buf->mem.pa;
+       info.len = buf->totallen;
+       info.tcplen = buf->tcphlen;
+       info.maclen = buf->maclen;
+       info.ipv4 = buf->ipv4;
+       info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ);
+
+       ret = i40iw_puda_send(&rsrc->qp, &info);
+       if (ret) {
+               rsrc->tx_wqe_avail_cnt++;
+               rsrc->stats_sent_pkt_q++;
+               list_add(&buf->list, &rsrc->txpend);
+               if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ)
+                       i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                   "%s: adding to puda_send\n", __func__);
+       } else {
+               rsrc->stats_pkt_sent++;
+       }
+done:
+       spin_unlock_irqrestore(&rsrc->bufpool_lock, flags);
+}
+
+/**
+ * i40iw_puda_qp_setctx - during init, set qp's context
+ * @rsrc: qp's resource
+ */
+static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc)
+{
+       struct i40iw_sc_qp *qp = &rsrc->qp;
+       u64 *qp_ctx = qp->hw_host_ctx;
+
+       set_64bit_val(qp_ctx, 8, qp->sq_pa);
+       set_64bit_val(qp_ctx, 16, qp->rq_pa);
+
+       set_64bit_val(qp_ctx, 24,
+                     LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
+                     LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE));
+
+       set_64bit_val(qp_ctx, 48, LS_64(1514, I40IWQPC_SNDMSS));
+       set_64bit_val(qp_ctx, 56, 0);
+       set_64bit_val(qp_ctx, 64, 1);
+
+       set_64bit_val(qp_ctx, 136,
+                     LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) |
+                     LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM));
+
+       set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN));
+
+       set_64bit_val(qp_ctx, 168,
+                     LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX));
+
+       set_64bit_val(qp_ctx, 176,
+                     LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
+                     LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
+                     LS_64(qp->qs_handle, I40IWQPC_QSHANDLE));
+
+       i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT",
+                       qp_ctx, I40IW_QP_CTX_SIZE);
+}
+
+/**
+ * i40iw_puda_qp_wqe - setup wqe for qp create
+ * @rsrc: resource for qp
+ */
+static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_puda_rsrc *rsrc)
+{
+       struct i40iw_sc_qp *qp = &rsrc->qp;
+       struct i40iw_sc_dev *dev = rsrc->dev;
+       struct i40iw_sc_cqp *cqp;
+       u64 *wqe;
+       u64 header;
+       struct i40iw_ccq_cqe_info compl_info;
+       enum i40iw_status_code status = 0;
+
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
+       set_64bit_val(wqe, 40, qp->shadow_area_pa);
+       header = qp->qp_uk.qp_id |
+                LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
+                LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) |
+                LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) |
+                LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
+                LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+
+       set_64bit_val(wqe, 24, header);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
+       i40iw_sc_cqp_post_sq(cqp);
+       status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+                                                   I40IW_CQP_OP_CREATE_QP,
+                                                   &compl_info);
+       return status;
+}
+
+/**
+ * i40iw_puda_qp_create - create qp for resource
+ * @rsrc: resource to use for buffer
+ */
+static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc)
+{
+       struct i40iw_sc_qp *qp = &rsrc->qp;
+       struct i40iw_qp_uk *ukqp = &qp->qp_uk;
+       enum i40iw_status_code ret = 0;
+       u32 sq_size, rq_size, t_size;
+       struct i40iw_dma_mem *mem;
+
+       sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE;
+       rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE;
+       t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) +
+                 I40IW_QP_CTX_SIZE);
+       /* Get page aligned memory */
+       ret =
+           i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size,
+                                  I40IW_HW_PAGE_SIZE);
+       if (ret) {
+               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__);
+               return ret;
+       }
+
+       mem = &rsrc->qpmem;
+       memset(mem->va, 0, t_size);
+       qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false);
+       qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false);
+       qp->pd = &rsrc->sc_pd;
+       qp->qp_type = I40IW_QP_TYPE_UDA;
+       qp->dev = rsrc->dev;
+       qp->back_qp = (void *)rsrc;
+       qp->sq_pa = mem->pa;
+       qp->rq_pa = qp->sq_pa + sq_size;
+       ukqp->sq_base = mem->va;
+       ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size];
+       ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem;
+       qp->shadow_area_pa = qp->rq_pa + rq_size;
+       qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE;
+       qp->hw_host_ctx_pa =
+               qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3);
+       ukqp->qp_id = rsrc->qp_id;
+       ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array;
+       ukqp->rq_wrid_array = rsrc->rq_wrid_array;
+
+       ukqp->qp_id = rsrc->qp_id;
+       ukqp->sq_size = rsrc->sq_size;
+       ukqp->rq_size = rsrc->rq_size;
+
+       I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size);
+       I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size);
+       I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size);
+
+       if (qp->pd->dev->is_pf)
+               ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+                                                   I40E_PFPE_WQEALLOC);
+       else
+               ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
+                                                   I40E_VFPE_WQEALLOC1);
+
+       qp->qs_handle = qp->dev->qs_handle;
+       i40iw_puda_qp_setctx(rsrc);
+       ret = i40iw_puda_qp_wqe(rsrc);
+       if (ret)
+               i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem);
+       return ret;
+}
+
+/**
+ * i40iw_puda_cq_create - create cq for resource
+ * @rsrc: resource for which cq to create
+ */
+static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc)
+{
+       struct i40iw_sc_dev *dev = rsrc->dev;
+       struct i40iw_sc_cq *cq = &rsrc->cq;
+       u64 *wqe;
+       struct i40iw_sc_cqp *cqp;
+       u64 header;
+       enum i40iw_status_code ret = 0;
+       u32 tsize, cqsize;
+       u32 shadow_read_threshold = 128;
+       struct i40iw_dma_mem *mem;
+       struct i40iw_ccq_cqe_info compl_info;
+       struct i40iw_cq_init_info info;
+       struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info;
+
+       cq->back_cq = (void *)rsrc;
+       cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe));
+       tsize = cqsize + sizeof(struct i40iw_cq_shadow_area);
+       ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize,
+                                    I40IW_CQ0_ALIGNMENT_MASK);
+       if (ret)
+               return ret;
+
+       mem = &rsrc->cqmem;
+       memset(&info, 0, sizeof(info));
+       info.dev = dev;
+       info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ?
+                        I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ;
+       info.shadow_read_threshold = rsrc->cq_size >> 2;
+       info.ceq_id_valid = true;
+       info.cq_base_pa = mem->pa;
+       info.shadow_area_pa = mem->pa + cqsize;
+       init_info->cq_base = mem->va;
+       init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize);
+       init_info->cq_size = rsrc->cq_size;
+       init_info->cq_id = rsrc->cq_id;
+       ret = dev->iw_priv_cq_ops->cq_init(cq, &info);
+       if (ret)
+               goto error;
+       cqp = dev->cqp;
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0);
+       if (!wqe) {
+               ret = I40IW_ERR_RING_FULL;
+               goto error;
+       }
+
+       set_64bit_val(wqe, 0, rsrc->cq_size);
+       set_64bit_val(wqe, 8, RS_64_1(cq, 1));
+       set_64bit_val(wqe, 16, LS_64(shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
+       set_64bit_val(wqe, 32, cq->cq_pa);
+
+       set_64bit_val(wqe, 40, cq->shadow_area_pa);
+
+       header = rsrc->cq_id |
+           LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
+           LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
+           LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
+           LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
+           LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       set_64bit_val(wqe, 24, header);
+
+       i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
+                       wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       i40iw_sc_cqp_post_sq(dev->cqp);
+       ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+                                                I40IW_CQP_OP_CREATE_CQ,
+                                                &compl_info);
+
+error:
+       if (ret)
+               i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+       return ret;
+}
+
+/**
+ * i40iw_puda_dele_resources - delete all resources during close
+ * @dev: iwarp device
+ * @type: type of resource to dele
+ * @reset: true if reset chip
+ */
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+                              enum puda_resource_type type,
+                              bool reset)
+{
+       struct i40iw_ccq_cqe_info compl_info;
+       struct i40iw_puda_rsrc *rsrc;
+       struct i40iw_puda_buf *buf = NULL;
+       struct i40iw_puda_buf *nextbuf = NULL;
+       struct i40iw_virt_mem *vmem;
+       enum i40iw_status_code ret;
+
+       switch (type) {
+       case I40IW_PUDA_RSRC_TYPE_ILQ:
+               rsrc = dev->ilq;
+               vmem = &dev->ilq_mem;
+               break;
+       case I40IW_PUDA_RSRC_TYPE_IEQ:
+               rsrc = dev->ieq;
+               vmem = &dev->ieq_mem;
+               break;
+       default:
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n",
+                           __func__, type);
+               return;
+       }
+
+       switch (rsrc->completion) {
+       case PUDA_HASH_CRC_COMPLETE:
+               i40iw_free_hash_desc(rsrc->hash_desc);
+       case PUDA_QP_CREATED:
+               do {
+                       if (reset)
+                               break;
+                       ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp,
+                                                             0, false, true, true);
+                       if (ret)
+                               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                           "%s error ieq qp destroy\n",
+                                           __func__);
+
+                       ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+                                                                I40IW_CQP_OP_DESTROY_QP,
+                                                                &compl_info);
+                       if (ret)
+                               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                           "%s error ieq qp destroy done\n",
+                                           __func__);
+               } while (0);
+
+               i40iw_free_dma_mem(dev->hw, &rsrc->qpmem);
+               /* fallthrough */
+       case PUDA_CQ_CREATED:
+               do {
+                       if (reset)
+                               break;
+                       ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true);
+                       if (ret)
+                               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                           "%s error ieq cq destroy\n",
+                                           __func__);
+
+                       ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp,
+                                                                I40IW_CQP_OP_DESTROY_CQ,
+                                                                &compl_info);
+                       if (ret)
+                               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA,
+                                           "%s error ieq qp destroy done\n",
+                                           __func__);
+               } while (0);
+
+               i40iw_free_dma_mem(dev->hw, &rsrc->cqmem);
+               break;
+       default:
+               i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__);
+               break;
+       }
+       /* Free all allocated puda buffers for both tx and rx */
+       buf = rsrc->alloclist;
+       while (buf) {
+               nextbuf = buf->next;
+               i40iw_puda_dele_buf(dev, buf);
+               buf = nextbuf;
+               rsrc->alloc_buf_count--;
+       }
+       i40iw_free_virt_mem(dev->hw, vmem);
+}
+
+/**
+ * i40iw_puda_allocbufs - allocate buffers for resource
+ * @rsrc: resource for buffer allocation
+ * @count: number of buffers to create
+ */
+static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc,
+                                                  u32 count)
+{
+       u32 i;
+       struct i40iw_puda_buf *buf;
+       struct i40iw_puda_buf *nextbuf;
+
+       for (i = 0; i < count; i++) {
+               buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size);
+               if (!buf) {
+                       rsrc->stats_buf_alloc_fail++;
+                       return I40IW_ERR_NO_MEMORY;
+               }
+               i40iw_puda_ret_bufpool(rsrc, buf);
+               rsrc->alloc_buf_count++;
+               if (!rsrc->alloclist) {
+                       rsrc->alloclist = buf;
+               } else {
+                       nextbuf = rsrc->alloclist;
+                       rsrc->alloclist = buf;
+                       buf->next = nextbuf;
+               }
+       }
+       rsrc->avail_buf_count = rsrc->alloc_buf_count;
+       return 0;
+}
+
+/**
+ * i40iw_puda_create_rsrc - create resouce (ilq or ieq)
+ * @dev: iwarp device
+ * @info: resource information
+ */
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+                                             struct i40iw_puda_rsrc_info *info)
+{
+       enum i40iw_status_code ret = 0;
+       struct i40iw_puda_rsrc *rsrc;
+       u32 pudasize;
+       u32 sqwridsize, rqwridsize;
+       struct i40iw_virt_mem *vmem;
+
+       info->count = 1;
+       pudasize = sizeof(struct i40iw_puda_rsrc);
+       sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info);
+       rqwridsize = info->rq_size * 8;
+       switch (info->type) {
+       case I40IW_PUDA_RSRC_TYPE_ILQ:
+               vmem = &dev->ilq_mem;
+               break;
+       case I40IW_PUDA_RSRC_TYPE_IEQ:
+               vmem = &dev->ieq_mem;
+               break;
+       default:
+               return I40IW_NOT_SUPPORTED;
+       }
+       ret =
+           i40iw_allocate_virt_mem(dev->hw, vmem,
+                                   pudasize + sqwridsize + rqwridsize);
+       if (ret)
+               return ret;
+       rsrc = (struct i40iw_puda_rsrc *)vmem->va;
+       spin_lock_init(&rsrc->bufpool_lock);
+       if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) {
+               dev->ilq = (struct i40iw_puda_rsrc *)vmem->va;
+               dev->ilq_count = info->count;
+               rsrc->receive = info->receive;
+               rsrc->xmit_complete = info->xmit_complete;
+       } else {
+               vmem = &dev->ieq_mem;
+               dev->ieq_count = info->count;
+               dev->ieq = (struct i40iw_puda_rsrc *)vmem->va;
+               rsrc->receive = i40iw_ieq_receive;
+               rsrc->xmit_complete = i40iw_ieq_tx_compl;
+       }
+
+       rsrc->type = info->type;
+       rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize);
+       rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize);
+       rsrc->mss = info->mss;
+       /* Initialize all ieq lists */
+       INIT_LIST_HEAD(&rsrc->bufpool);
+       INIT_LIST_HEAD(&rsrc->txpend);
+
+       rsrc->tx_wqe_avail_cnt = info->sq_size - 1;
+       dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id);
+       rsrc->qp_id = info->qp_id;
+       rsrc->cq_id = info->cq_id;
+       rsrc->sq_size = info->sq_size;
+       rsrc->rq_size = info->rq_size;
+       rsrc->cq_size = info->rq_size + info->sq_size;
+       rsrc->buf_size = info->buf_size;
+       rsrc->dev = dev;
+
+       ret = i40iw_puda_cq_create(rsrc);
+       if (!ret) {
+               rsrc->completion = PUDA_CQ_CREATED;
+               ret = i40iw_puda_qp_create(rsrc);
+       }
+       if (ret) {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", __func__);
+               goto error;
+       }
+       rsrc->completion = PUDA_QP_CREATED;
+
+       ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size);
+       if (ret) {
+               i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error allloc_buf\n", __func__);
+               goto error;
+       }
+
+       rsrc->rxq_invalid_cnt = info->rq_size;
+       ret = i40iw_puda_replenish_rq(rsrc, true);
+       if (ret)
+               goto error;
+
+       if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) {
+               if (!i40iw_init_hash_desc(&rsrc->hash_desc)) {
+                       rsrc->check_crc = true;
+                       rsrc->completion = PUDA_HASH_CRC_COMPLETE;
+                       ret = 0;
+               }
+       }
+
+       dev->ccq_ops->ccq_arm(&rsrc->cq);
+       return ret;
+ error:
+       i40iw_puda_dele_resources(dev, info->type, false);
+
+       return ret;
+}
+
+/**
+ * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq
+ * @qp: ilq's qp resource
+ * @wqe_idx:  wqe index of completed rcvbuf
+ */
+static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx)
+{
+       u64 *wqe;
+       u64 offset24;
+
+       wqe = qp->qp_uk.rq_base[wqe_idx].elem;
+       get_64bit_val(wqe, 24, &offset24);
+       offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
+       set_64bit_val(wqe, 24, offset24);
+}
+
+/**
+ * i40iw_ieq_get_fpdu - given length return fpdu length
+ * @length: length if fpdu
+ */
+static u16 i40iw_ieq_get_fpdu_length(u16 length)
+{
+       u16 fpdu_len;
+
+       fpdu_len = length + I40IW_IEQ_MPA_FRAMING;
+       fpdu_len = (fpdu_len + 3) & 0xfffffffc;
+       return fpdu_len;
+}
+
+/**
+ * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf
+ * @buf: rcv buffer with partial
+ * @txbuf: tx buffer for sendign back
+ * @buf_offset: rcv buffer offset to copy from
+ * @txbuf_offset: at offset in tx buf to copy
+ * @length: length of data to copy
+ */
+static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf,
+                                   struct i40iw_puda_buf *txbuf,
+                                   u16 buf_offset, u32 txbuf_offset,
+                                   u32 length)
+{
+       void *mem1 = (u8 *)buf->mem.va + buf_offset;
+       void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset;
+
+       memcpy(mem2, mem1, length);
+}
+
+/**
+ * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling
+ * @buf: reeive buffer with partial
+ * @txbuf: buffer to prepare
+ */
+static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf,
+                                  struct i40iw_puda_buf *txbuf)
+{
+       txbuf->maclen = buf->maclen;
+       txbuf->tcphlen = buf->tcphlen;
+       txbuf->ipv4 = buf->ipv4;
+       txbuf->hdrlen = buf->hdrlen;
+       i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen);
+}
+
+/**
+ * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range
+ * @buf: receive exception buffer
+ * @fps: first partial sequence number
+ */
+static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps)
+{
+       u32 offset;
+
+       if (buf->seqnum < fps) {
+               offset = fps - buf->seqnum;
+               if (offset > buf->datalen)
+                       return;
+               buf->data += offset;
+               buf->datalen -= (u16)offset;
+               buf->seqnum = fps;
+       }
+}
+
+/**
+ * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu
+ * @ieq: ieq resource
+ * @rxlist: ieq's received buffer list
+ * @pbufl: temporary list for buffers for fpddu
+ * @txbuf: tx buffer for fpdu
+ * @fpdu_len: total length of fpdu
+ */
+static void  i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq,
+                                  struct list_head *rxlist,
+                                  struct list_head *pbufl,
+                                  struct i40iw_puda_buf *txbuf,
+                                  u16 fpdu_len)
+{
+       struct i40iw_puda_buf *buf;
+       u32 nextseqnum;
+       u16 txoffset, bufoffset;
+
+       buf = i40iw_puda_get_listbuf(pbufl);
+       nextseqnum = buf->seqnum + fpdu_len;
+       txbuf->totallen = buf->hdrlen + fpdu_len;
+       txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen;
+       i40iw_ieq_setup_tx_buf(buf, txbuf);
+
+       txoffset = buf->hdrlen;
+       bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+
+       do {
+               if (buf->datalen >= fpdu_len) {
+                       /* copied full fpdu */
+                       i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len);
+                       buf->datalen -= fpdu_len;
+                       buf->data += fpdu_len;
+                       buf->seqnum = nextseqnum;
+                       break;
+               }
+               /* copy partial fpdu */
+               i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen);
+               txoffset += buf->datalen;
+               fpdu_len -= buf->datalen;
+               i40iw_puda_ret_bufpool(ieq, buf);
+               buf = i40iw_puda_get_listbuf(pbufl);
+               bufoffset = (u16)(buf->data - (u8 *)buf->mem.va);
+       } while (1);
+
+       /* last buffer on the list*/
+       if (buf->datalen)
+               list_add(&buf->list, rxlist);
+       else
+               i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_create_pbufl - create buffer list for single fpdu
+ * @rxlist: resource list for receive ieq buffes
+ * @pbufl: temp. list for buffers for fpddu
+ * @buf: first receive buffer
+ * @fpdu_len: total length of fpdu
+ */
+static enum i40iw_status_code i40iw_ieq_create_pbufl(
+                                                    struct i40iw_pfpdu *pfpdu,
+                                                    struct list_head *rxlist,
+                                                    struct list_head *pbufl,
+                                                    struct i40iw_puda_buf *buf,
+                                                    u16 fpdu_len)
+{
+       enum i40iw_status_code status = 0;
+       struct i40iw_puda_buf *nextbuf;
+       u32     nextseqnum;
+       u16 plen = fpdu_len - buf->datalen;
+       bool done = false;
+
+       nextseqnum = buf->seqnum + buf->datalen;
+       do {
+               nextbuf = i40iw_puda_get_listbuf(rxlist);
+               if (!nextbuf) {
+                       status = I40IW_ERR_list_empty;
+                       break;
+               }
+               list_add_tail(&nextbuf->list, pbufl);
+               if (nextbuf->seqnum != nextseqnum) {
+                       pfpdu->bad_seq_num++;
+                       status = I40IW_ERR_SEQ_NUM;
+                       break;
+               }
+               if (nextbuf->datalen >= plen) {
+                       done = true;
+               } else {
+                       plen -= nextbuf->datalen;
+                       nextseqnum = nextbuf->seqnum + nextbuf->datalen;
+               }
+
+       } while (!done);
+
+       return status;
+}
+
+/**
+ * i40iw_ieq_handle_partial - process partial fpdu buffer
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ * @fpdu_len: fpdu len in the buffer
+ */
+static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq,
+                                                      struct i40iw_pfpdu *pfpdu,
+                                                      struct i40iw_puda_buf *buf,
+                                                      u16 fpdu_len)
+{
+       enum i40iw_status_code status = 0;
+       u8 *crcptr;
+       u32 mpacrc;
+       u32 seqnum = buf->seqnum;
+       struct list_head pbufl; /* partial buffer list */
+       struct i40iw_puda_buf *txbuf = NULL;
+       struct list_head *rxlist = &pfpdu->rxlist;
+
+       INIT_LIST_HEAD(&pbufl);
+       list_add(&buf->list, &pbufl);
+
+       status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len);
+       if (!status)
+               goto error;
+
+       txbuf = i40iw_puda_get_bufpool(ieq);
+       if (!txbuf) {
+               pfpdu->no_tx_bufs++;
+               status = I40IW_ERR_NO_TXBUFS;
+               goto error;
+       }
+
+       i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len);
+       i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum);
+       crcptr = txbuf->data + fpdu_len - 4;
+       mpacrc = *(u32 *)crcptr;
+       if (ieq->check_crc) {
+               status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data,
+                                               (fpdu_len - 4), mpacrc);
+               if (status) {
+                       i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+                                   "%s: error bad crc\n", __func__);
+                       goto error;
+               }
+       }
+
+       i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER",
+                       txbuf->mem.va, txbuf->totallen);
+       i40iw_puda_send_buf(ieq, txbuf);
+       pfpdu->rcv_nxt = seqnum + fpdu_len;
+       return status;
+ error:
+       while (!list_empty(&pbufl)) {
+               buf = (struct i40iw_puda_buf *)(pbufl.prev);
+               list_del(&buf->list);
+               list_add(&buf->list, rxlist);
+       }
+       if (txbuf)
+               i40iw_puda_ret_bufpool(ieq, txbuf);
+       return status;
+}
+
+/**
+ * i40iw_ieq_process_buf - process buffer rcvd for ieq
+ * @ieq: ieq resource
+ * @pfpdu: partial management per user qp
+ * @buf: receive buffer
+ */
+static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq,
+                                                   struct i40iw_pfpdu *pfpdu,
+                                                   struct i40iw_puda_buf *buf)
+{
+       u16 fpdu_len = 0;
+       u16 datalen = buf->datalen;
+       u8 *datap = buf->data;
+       u8 *crcptr;
+       u16 ioffset = 0;
+       u32 mpacrc;
+       u32 seqnum = buf->seqnum;
+       u16 length = 0;
+       u16 full = 0;
+       bool partial = false;
+       struct i40iw_puda_buf *txbuf;
+       struct list_head *rxlist = &pfpdu->rxlist;
+       enum i40iw_status_code ret = 0;
+       enum i40iw_status_code status = 0;
+
+       ioffset = (u16)(buf->data - (u8 *)buf->mem.va);
+       while (datalen) {
+               fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(u16 *)datap));
+               if (fpdu_len > pfpdu->max_fpdu_data) {
+                       i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+                                   "%s: error bad fpdu_len\n", __func__);
+                       status = I40IW_ERR_MPA_CRC;
+                       list_add(&buf->list, rxlist);
+                       return status;
+               }
+
+               if (datalen < fpdu_len) {
+                       partial = true;
+                       break;
+               }
+               crcptr = datap + fpdu_len - 4;
+               mpacrc = *(u32 *)crcptr;
+               if (ieq->check_crc)
+                       ret = i40iw_ieq_check_mpacrc(ieq->hash_desc,
+                                                    datap, fpdu_len - 4, mpacrc);
+               if (ret) {
+                       status = I40IW_ERR_MPA_CRC;
+                       list_add(&buf->list, rxlist);
+                       return status;
+               }
+               full++;
+               pfpdu->fpdu_processed++;
+               datap += fpdu_len;
+               length += fpdu_len;
+               datalen -= fpdu_len;
+       }
+       if (full) {
+               /* copy full pdu's in the txbuf and send them out */
+               txbuf = i40iw_puda_get_bufpool(ieq);
+               if (!txbuf) {
+                       pfpdu->no_tx_bufs++;
+                       status = I40IW_ERR_NO_TXBUFS;
+                       list_add(&buf->list, rxlist);
+                       return status;
+               }
+               /* modify txbuf's buffer header */
+               i40iw_ieq_setup_tx_buf(buf, txbuf);
+               /* copy full fpdu's to new buffer */
+               i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen,
+                                       length);
+               txbuf->totallen = buf->hdrlen + length;
+
+               i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum);
+               i40iw_puda_send_buf(ieq, txbuf);
+
+               if (!datalen) {
+                       pfpdu->rcv_nxt = buf->seqnum + length;
+                       i40iw_puda_ret_bufpool(ieq, buf);
+                       return status;
+               }
+               buf->data = datap;
+               buf->seqnum = seqnum + length;
+               buf->datalen = datalen;
+               pfpdu->rcv_nxt = buf->seqnum;
+       }
+       if (partial)
+               status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len);
+
+       return status;
+}
+
+/**
+ * i40iw_ieq_process_fpdus - process fpdu's buffers on its list
+ * @qp: qp for which partial fpdus
+ * @ieq: ieq resource
+ */
+static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp,
+                                   struct i40iw_puda_rsrc *ieq)
+{
+       struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+       struct list_head *rxlist = &pfpdu->rxlist;
+       struct i40iw_puda_buf *buf;
+       enum i40iw_status_code status;
+
+       do {
+               if (list_empty(rxlist))
+                       break;
+               buf = i40iw_puda_get_listbuf(rxlist);
+               if (!buf) {
+                       i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+                                   "%s: error no buf\n", __func__);
+                       break;
+               }
+               if (buf->seqnum != pfpdu->rcv_nxt) {
+                       /* This could be out of order or missing packet */
+                       pfpdu->out_of_order++;
+                       list_add(&buf->list, rxlist);
+                       break;
+               }
+               /* keep processing buffers from the head of the list */
+               status = i40iw_ieq_process_buf(ieq, pfpdu, buf);
+               if (status == I40IW_ERR_MPA_CRC) {
+                       pfpdu->mpa_crc_err = true;
+                       while (!list_empty(rxlist)) {
+                               buf = i40iw_puda_get_listbuf(rxlist);
+                               i40iw_puda_ret_bufpool(ieq, buf);
+                               pfpdu->crc_err++;
+                       }
+                       /* create CQP for AE */
+                       i40iw_ieq_mpa_crc_ae(ieq->dev, qp);
+               }
+       } while (!status);
+}
+
+/**
+ * i40iw_ieq_handle_exception - handle qp's exception
+ * @ieq: ieq resource
+ * @qp: qp receiving excpetion
+ * @buf: receive buffer
+ */
+static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq,
+                                      struct i40iw_sc_qp *qp,
+                                      struct i40iw_puda_buf *buf)
+{
+       struct i40iw_puda_buf *tmpbuf = NULL;
+       struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+       u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
+       u32 rcv_wnd = hw_host_ctx[23];
+       /* first partial seq # in q2 */
+       u32 fps = qp->q2_buf[16];
+       struct list_head *rxlist = &pfpdu->rxlist;
+       struct list_head *plist;
+
+       pfpdu->total_ieq_bufs++;
+
+       if (pfpdu->mpa_crc_err) {
+               pfpdu->crc_err++;
+               goto error;
+       }
+       if (pfpdu->mode && (fps != pfpdu->fps)) {
+               /* clean up qp as it is new partial sequence */
+               i40iw_ieq_cleanup_qp(ieq->dev, qp);
+               i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ,
+                           "%s: restarting new partial\n", __func__);
+               pfpdu->mode = false;
+       }
+
+       if (!pfpdu->mode) {
+               i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128);
+               /* First_Partial_Sequence_Number check */
+               pfpdu->rcv_nxt = fps;
+               pfpdu->fps = fps;
+               pfpdu->mode = true;
+               pfpdu->max_fpdu_data = ieq->mss;
+               pfpdu->pmode_count++;
+               INIT_LIST_HEAD(rxlist);
+               i40iw_ieq_check_first_buf(buf, fps);
+       }
+
+       if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) {
+               pfpdu->bad_seq_num++;
+               goto error;
+       }
+
+       if (!list_empty(rxlist)) {
+               tmpbuf = (struct i40iw_puda_buf *)rxlist->next;
+               plist = &tmpbuf->list;
+               while ((struct list_head *)tmpbuf != rxlist) {
+                       if ((int)(buf->seqnum - tmpbuf->seqnum) < 0)
+                               break;
+                       tmpbuf = (struct i40iw_puda_buf *)plist->next;
+               }
+               /* Insert buf before tmpbuf */
+               list_add_tail(&buf->list, &tmpbuf->list);
+       } else {
+               list_add_tail(&buf->list, rxlist);
+       }
+       i40iw_ieq_process_fpdus(qp, ieq);
+       return;
+ error:
+       i40iw_puda_ret_bufpool(ieq, buf);
+}
+
+/**
+ * i40iw_ieq_receive - received exception buffer
+ * @dev: iwarp device
+ * @buf: exception buffer received
+ */
+static void i40iw_ieq_receive(struct i40iw_sc_dev *dev,
+                             struct i40iw_puda_buf *buf)
+{
+       struct i40iw_puda_rsrc *ieq = dev->ieq;
+       struct i40iw_sc_qp *qp = NULL;
+       u32 wqe_idx = ieq->compl_rxwqe_idx;
+
+       qp = i40iw_ieq_get_qp(dev, buf);
+       if (!qp) {
+               ieq->stats_bad_qp_id++;
+               i40iw_puda_ret_bufpool(ieq, buf);
+       } else {
+               i40iw_ieq_handle_exception(ieq, qp, buf);
+       }
+       /*
+        * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq()
+        * on which wqe_idx to start replenish rq
+        */
+       if (!ieq->rxq_invalid_cnt)
+               ieq->rx_wqe_idx = wqe_idx;
+       ieq->rxq_invalid_cnt++;
+}
+
+/**
+ * i40iw_ieq_tx_compl - put back after sending completed exception buffer
+ * @dev: iwarp device
+ * @sqwrid: pointer to puda buffer
+ */
+static void i40iw_ieq_tx_compl(struct i40iw_sc_dev *dev, void *sqwrid)
+{
+       struct i40iw_puda_rsrc *ieq = dev->ieq;
+       struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid;
+
+       i40iw_puda_ret_bufpool(ieq, buf);
+       if (!list_empty(&ieq->txpend)) {
+               buf = i40iw_puda_get_listbuf(&ieq->txpend);
+               i40iw_puda_send_buf(ieq, buf);
+       }
+}
+
+/**
+ * i40iw_ieq_cleanup_qp - qp is being destroyed
+ * @dev: iwarp device
+ * @qp: all pending fpdu buffers
+ */
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+       struct i40iw_puda_buf *buf;
+       struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
+       struct list_head *rxlist = &pfpdu->rxlist;
+       struct i40iw_puda_rsrc *ieq = dev->ieq;
+
+       if (!pfpdu->mode)
+               return;
+       while (!list_empty(rxlist)) {
+               buf = i40iw_puda_get_listbuf(rxlist);
+               i40iw_puda_ret_bufpool(ieq, buf);
+       }
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
new file mode 100644 (file)
index 0000000..52bf782
--- /dev/null
@@ -0,0 +1,183 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_PUDA_H
+#define I40IW_PUDA_H
+
+#define I40IW_IEQ_MPA_FRAMING 6
+
+struct i40iw_sc_dev;
+struct i40iw_sc_qp;
+struct i40iw_sc_cq;
+
+enum puda_resource_type {
+       I40IW_PUDA_RSRC_TYPE_ILQ = 1,
+       I40IW_PUDA_RSRC_TYPE_IEQ
+};
+
+enum puda_rsrc_complete {
+       PUDA_CQ_CREATED = 1,
+       PUDA_QP_CREATED,
+       PUDA_TX_COMPLETE,
+       PUDA_RX_COMPLETE,
+       PUDA_HASH_CRC_COMPLETE
+};
+
+struct i40iw_puda_completion_info {
+       struct i40iw_qp_uk *qp;
+       u8 q_type;
+       u8 vlan_valid;
+       u8 l3proto;
+       u8 l4proto;
+       u16 payload_len;
+       u32 compl_error;        /* No_err=0, else major and minor err code */
+       u32 qp_id;
+       u32 wqe_idx;
+};
+
+struct i40iw_puda_send_info {
+       u64 paddr;              /* Physical address */
+       u32 len;
+       u8 tcplen;
+       u8 maclen;
+       bool ipv4;
+       bool doloopback;
+       void *scratch;
+};
+
+struct i40iw_puda_buf {
+       struct list_head list;  /* MUST be first entry */
+       struct i40iw_dma_mem mem;       /* DMA memory for the buffer */
+       struct i40iw_puda_buf *next;    /* for alloclist in rsrc struct */
+       struct i40iw_virt_mem buf_mem;  /* Buffer memory for this buffer */
+       void *scratch;
+       u8 *iph;
+       u8 *tcph;
+       u8 *data;
+       u16 datalen;
+       u16 vlan_id;
+       u8 tcphlen;             /* tcp length in bytes */
+       u8 maclen;              /* mac length in bytes */
+       u32 totallen;           /* machlen+iphlen+tcphlen+datalen */
+       atomic_t refcount;
+       u8 hdrlen;
+       bool ipv4;
+       u32 seqnum;
+};
+
+struct i40iw_puda_rsrc_info {
+       enum puda_resource_type type;   /* ILQ or IEQ */
+       u32 count;
+       u16 pd_id;
+       u32 cq_id;
+       u32 qp_id;
+       u32 sq_size;
+       u32 rq_size;
+       u16 buf_size;
+       u16 mss;
+       u32 tx_buf_cnt;         /* total bufs allocated will be rq_size + tx_buf_cnt */
+       void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+       void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+};
+
+struct i40iw_puda_rsrc {
+       struct i40iw_sc_cq cq;
+       struct i40iw_sc_qp qp;
+       struct i40iw_sc_pd sc_pd;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_dma_mem cqmem;
+       struct i40iw_dma_mem qpmem;
+       struct i40iw_virt_mem ilq_mem;
+       enum puda_rsrc_complete completion;
+       enum puda_resource_type type;
+       u16 buf_size;           /*buffer must be max datalen + tcpip hdr + mac */
+       u16 mss;
+       u32 cq_id;
+       u32 qp_id;
+       u32 sq_size;
+       u32 rq_size;
+       u32 cq_size;
+       struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+       u64 *rq_wrid_array;
+       u32 compl_rxwqe_idx;
+       u32 rx_wqe_idx;
+       u32 rxq_invalid_cnt;
+       u32 tx_wqe_avail_cnt;
+       bool check_crc;
+       struct shash_desc *hash_desc;
+       struct list_head txpend;
+       struct list_head bufpool;       /* free buffers pool list for recv and xmit */
+       u32 alloc_buf_count;
+       u32 avail_buf_count;            /* snapshot of currently available buffers */
+       spinlock_t bufpool_lock;
+       struct i40iw_puda_buf *alloclist;
+       void (*receive)(struct i40iw_sc_dev *, struct i40iw_puda_buf *);
+       void (*xmit_complete)(struct i40iw_sc_dev *, void *);
+       /* puda stats */
+       u64 stats_buf_alloc_fail;
+       u64 stats_pkt_rcvd;
+       u64 stats_pkt_sent;
+       u64 stats_rcvd_pkt_err;
+       u64 stats_sent_pkt_q;
+       u64 stats_bad_qp_id;
+};
+
+struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc);
+void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc,
+                           struct i40iw_puda_buf *buf);
+void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc,
+                        struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
+                                      struct i40iw_puda_send_info *info);
+enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_dev *dev,
+                                             struct i40iw_puda_rsrc_info *info);
+void i40iw_puda_dele_resources(struct i40iw_sc_dev *dev,
+                              enum puda_resource_type type,
+                              bool reset);
+enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev,
+                                                 struct i40iw_sc_cq *cq, u32 *compl_err);
+void i40iw_ieq_cleanup_qp(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+                                    struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+                                                struct i40iw_puda_buf *buf);
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+                                             void *addr, u32 length, u32 value);
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc);
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
+void i40iw_free_hash_desc(struct shash_desc *desc);
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length,
+                                u32 seqnum);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h
new file mode 100644 (file)
index 0000000..5776818
--- /dev/null
@@ -0,0 +1,1030 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_REGISTER_H
+#define I40IW_REGISTER_H
+
+#define I40E_GLGEN_STAT               0x000B612C /* Reset: POR */
+
+#define I40E_PFHMC_PDINV               0x000C0300 /* Reset: PFR */
+#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK  (0xFFF <<  I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK  (0x1FF <<  I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT  31
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK   (0x1 <<  I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT   0
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK    (0x1 <<  I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT    1
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK     (0x1 <<  I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK  (0x3FF <<  I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */     /* Reset: PFR */
+#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT          0
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK           (0x1 <<  I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT        1
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK         (0x1 <<  I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT        3
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK         (0x3 <<  I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+
+#define I40E_VFINT_DYN_CTLN1(_INTVF)               (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
+#define I40E_GLHMC_VFPDINV(_i)               (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK  (0x1 <<  I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLPCI_LBARCTRL                    0x000BE484 /* Reset: POR */
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT    4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK     (0x3 <<  I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_DREVID                      0x0009C480 /* Reset: PCIR */
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF
+
+#define I40E_PFPE_AEQALLOC               0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK  (0xFFFFFFFF <<  I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH                  0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK  (0xFFFFFFFF <<  I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW                 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK  (0xFFFFFFFF <<  I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS                   0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT   0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK    (0x1 <<  I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK  (0x7 <<  I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK  (0x3F <<  I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT    31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK     (0x1 <<  I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK              0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK  (0x1FFFF <<  I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM              0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK  (0x1FFFF <<  I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB              0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK  (0x7FF <<  I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES                      0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK  (0xFFFF <<  I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK  (0xFFFF <<  I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL                  0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT     0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK      (0x7FF <<  I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK  (0x1 <<  I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR                   0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK  (0xFFFF <<  I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR                   0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK  (0xFFFF <<  I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0                        0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT           0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK            (0xFFFF <<  I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK  (0x1 <<  I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK                       0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK  (0x1F <<  I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR                        0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK  (0xFFFFFF <<  I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER               0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK  (0xFFFFFFFF <<  I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+
+#define I40E_PFPE_WQEALLOC                      0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT         0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK          (0x3FFFF <<  I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK  (0xFFF <<  I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_VFPE_AEQALLOC(_VF)          (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX     127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK  (0xFFFFFFFF <<  I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF)             (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX        127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK  (0xFFFFFFFF <<  I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF)            (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX       127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK  (0xFFFFFFFF <<  I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF)              (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX         127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT   0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK    (0x1 <<  I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK  (0x7 <<  I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK  (0x3F <<  I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT    31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK     (0x1 <<  I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF)         (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX    127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK  (0x1FFFF <<  I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF)         (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX    127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK  (0x1FFFF <<  I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF)         (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX    127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK  (0x7FF <<  I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF)                 (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX            127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK  (0xFFFF <<  I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK  (0xFFFF <<  I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF)             (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX        127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT     0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK      (0x7FF <<  I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK  (0x1 <<  I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF)                   (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX              127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT           0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK            (0xFFFF <<  I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK  (0x1 <<  I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF)                  (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX             127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK  (0x1F <<  I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF)                   (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX              127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK  (0xFFFFFF <<  I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF)          (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX     127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK  (0xFFFFFFFF <<  I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF)                 (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX            127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT         0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK          (0x3FFFF <<  I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK  (0xFFF <<  I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+
+#define I40E_GLPE_CPUSTATUS0                    0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK  (0xFFFFFFFF <<  I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1                    0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK  (0xFFFFFFFF <<  I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2                    0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK  (0xFFFFFFFF <<  I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0                   0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT  0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK   (0xFFFF <<  I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK  (0x1 <<  I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK  (0x1 <<  I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM                     0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK  (0x1 <<  I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i)               (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX         15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i)               (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX         15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i)              (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX        15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL                0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK  (0xFF <<  I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL                0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK  (0xFF <<  I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL                   0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT    0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK     (0xFF <<  I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK  (0x1 <<  I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK  (0x1 <<  I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK  (0x1 <<  I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK  (0x1 <<  I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT    30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK     (0x1 <<  I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT   31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK    (0x1 <<  I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL                0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK  (0xFF <<  I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL                 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK  (0xFF <<  I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN                 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK  (0x1 <<  I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i)               (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX         31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i)               (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX         31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i)              (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX        31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK  (0xFFFF <<  I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i)                  (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX            31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK  (0x7 <<  I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT   8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK    (0x7 <<  I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i)               (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX         31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK  (0xFFFF <<  I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i)               (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX         31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK  (0xFFFF <<  I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i)                    (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX              31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT  0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK   (0x1 <<  I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT  1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK   (0x1 <<  I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT  2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK   (0x1 <<  I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT  3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK   (0x1 <<  I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK  (0x1 <<  I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i)         (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX   31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT   0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK    (0x3FFFF <<  I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK  (0x1 <<  I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+
+#define I40E_GLPES_PFIP4RXDISCARD(_i)                (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i)                (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i)                (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i)                 (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i)                 (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i)                 (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i)                 (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i)               (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i)               (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i)               (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i)               (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i)              (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX        15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i)                (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i)                (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i)                 (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i)                 (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i)                 (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i)                 (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i)                (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX          15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK  (0xFFFFFF <<  I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i)               (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i)               (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i)               (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i)               (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i)                (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i)                (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i)                (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i)                 (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i)                 (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i)                 (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i)                 (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i)               (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i)               (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i)               (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i)               (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i)              (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX        15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i)                (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i)                (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i)                 (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i)                 (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i)                 (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i)                 (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX           15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i)                (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX          15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK  (0xFFFFFF <<  I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i)               (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i)               (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i)               (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i)               (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i)               (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i)               (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i)                (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX          15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i)                (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX          15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i)               (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i)               (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i)               (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i)               (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i)                (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX          15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i)                (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX          15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i)               (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK  (0xFFFF <<  I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i)               (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX         15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i)              (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i)              (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i)              (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i)              (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX        15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i)             (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX       15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK  (0xFFFFFF <<  I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i)             (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX       15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i)               (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK  (0xFFFFFF <<  I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i)                 (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX           15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK  (0xFFFFFF <<  I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i)               (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK  (0xFFFF <<  I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i)               (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX         15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i)              (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX        15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK  (0xFFFF <<  I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i)              (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX        15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i)               (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i)               (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i)               (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX         15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i)               (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX         15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI                         0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK  (0xFFFFFF <<  I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO                         0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI                      0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK  (0xFFFFFF <<  I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO                      0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK                     0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK  (0xFFFFFFFF <<  I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN                     0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK  (0xFFFFFFFF <<  I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI                       0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO                       0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI                      0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO                      0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI                       0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO                      0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI                        0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO                        0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI                      0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO                      0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI                          0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO                          0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI                        0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO                        0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI                    0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK  (0xFFFFFF <<  I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO                    0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i)                (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i)                (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i)                (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i)                 (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i)                 (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i)                 (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i)                 (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i)               (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i)               (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i)               (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i)               (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i)              (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX        31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i)                (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i)                (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i)                 (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i)                 (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i)                 (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i)                 (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i)                (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX          31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK  (0xFFFFFF <<  I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i)               (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i)               (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i)               (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i)               (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i)                (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i)                (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i)                (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i)                 (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i)                 (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i)                 (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i)                 (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i)               (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i)               (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i)               (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i)               (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i)              (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX        31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i)                (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i)                (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i)                 (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i)                 (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i)                 (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i)                 (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX           31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i)                (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX          31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK  (0xFFFFFF <<  I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i)               (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i)               (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i)               (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i)               (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i)               (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i)               (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i)                (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX          31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i)                (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX          31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i)               (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i)               (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i)               (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i)               (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i)                (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX          31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i)                (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX          31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i)               (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK  (0xFFFF <<  I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i)               (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX         31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i)              (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i)              (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i)              (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i)              (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX        31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i)             (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX       31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK  (0xFFFFFF <<  I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i)             (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX       31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i)               (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK  (0xFFFFFF <<  I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i)                 (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX           31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK  (0xFFFFFF <<  I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i)               (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK  (0xFFFF <<  I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i)               (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX         31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i)              (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX        31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK  (0xFFFF <<  I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i)              (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX        31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i)               (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i)               (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i)               (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX         31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK  (0xFFFF <<  I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i)               (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX         31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK  (0xFFFFFFFF <<  I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+
+#define I40E_VFPE_AEQALLOC1               0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK  (0xFFFFFFFF <<  I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1                  0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK  (0xFFFFFFFF <<  I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1                 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK  (0xFFFFFFFF <<  I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1                   0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT   0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK    (0x1 <<  I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK  (0x7 <<  I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK  (0x3F <<  I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT    31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK     (0x1 <<  I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1              0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK  (0x1FFFF <<  I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1              0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK  (0x1FFFF <<  I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1              0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK  (0x7FF <<  I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1                      0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK  (0xFFFF <<  I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK  (0xFFFF <<  I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1                  0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT     0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK      (0x7FF <<  I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK  (0x1 <<  I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01                        0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT           0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK            (0xFFFF <<  I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK  (0x1 <<  I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1                       0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK  (0x1F <<  I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1                        0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK  (0xFFFFFF <<  I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1               0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK  (0xFFFFFFFF <<  I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1                      0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT         0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK          (0x3FFFF <<  I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK  (0xFFF <<  I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+#endif /* I40IW_REGISTER_H */
diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h
new file mode 100644 (file)
index 0000000..b0110c1
--- /dev/null
@@ -0,0 +1,100 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_STATUS_H
+#define I40IW_STATUS_H
+
+/* Error Codes */
+enum i40iw_status_code {
+       I40IW_SUCCESS = 0,
+       I40IW_ERR_NVM = -1,
+       I40IW_ERR_NVM_CHECKSUM = -2,
+       I40IW_ERR_CONFIG = -4,
+       I40IW_ERR_PARAM = -5,
+       I40IW_ERR_DEVICE_NOT_SUPPORTED = -6,
+       I40IW_ERR_RESET_FAILED = -7,
+       I40IW_ERR_SWFW_SYNC = -8,
+       I40IW_ERR_NO_MEMORY = -9,
+       I40IW_ERR_BAD_PTR = -10,
+       I40IW_ERR_INVALID_PD_ID = -11,
+       I40IW_ERR_INVALID_QP_ID = -12,
+       I40IW_ERR_INVALID_CQ_ID = -13,
+       I40IW_ERR_INVALID_CEQ_ID = -14,
+       I40IW_ERR_INVALID_AEQ_ID = -15,
+       I40IW_ERR_INVALID_SIZE = -16,
+       I40IW_ERR_INVALID_ARP_INDEX = -17,
+       I40IW_ERR_INVALID_FPM_FUNC_ID = -18,
+       I40IW_ERR_QP_INVALID_MSG_SIZE = -19,
+       I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20,
+       I40IW_ERR_INVALID_FRAG_COUNT = -21,
+       I40IW_ERR_QUEUE_EMPTY = -22,
+       I40IW_ERR_INVALID_ALIGNMENT = -23,
+       I40IW_ERR_FLUSHED_QUEUE = -24,
+       I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25,
+       I40IW_ERR_INVALID_IMM_DATA_SIZE = -26,
+       I40IW_ERR_TIMEOUT = -27,
+       I40IW_ERR_OPCODE_MISMATCH = -28,
+       I40IW_ERR_CQP_COMPL_ERROR = -29,
+       I40IW_ERR_INVALID_VF_ID = -30,
+       I40IW_ERR_INVALID_HMCFN_ID = -31,
+       I40IW_ERR_BACKING_PAGE_ERROR = -32,
+       I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
+       I40IW_ERR_INVALID_PBLE_INDEX = -34,
+       I40IW_ERR_INVALID_SD_INDEX = -35,
+       I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36,
+       I40IW_ERR_INVALID_SD_TYPE = -37,
+       I40IW_ERR_MEMCPY_FAILED = -38,
+       I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39,
+       I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40,
+       I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41,
+       I40IW_ERR_SRQ_ENABLED = -42,
+       I40IW_ERR_BUF_TOO_SHORT = -43,
+       I40IW_ERR_BAD_IWARP_CQE = -44,
+       I40IW_ERR_NVM_BLANK_MODE = -45,
+       I40IW_ERR_NOT_IMPLEMENTED = -46,
+       I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47,
+       I40IW_ERR_NOT_READY = -48,
+       I40IW_NOT_SUPPORTED = -49,
+       I40IW_ERR_FIRMWARE_API_VERSION = -50,
+       I40IW_ERR_RING_FULL = -51,
+       I40IW_ERR_MPA_CRC = -61,
+       I40IW_ERR_NO_TXBUFS = -62,
+       I40IW_ERR_SEQ_NUM = -63,
+       I40IW_ERR_list_empty = -64,
+       I40IW_ERR_INVALID_MAC_ADDR = -65,
+       I40IW_ERR_BAD_STAG      = -66,
+       I40IW_ERR_CQ_COMPL_ERROR = -67,
+
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h
new file mode 100644 (file)
index 0000000..edb3a8c
--- /dev/null
@@ -0,0 +1,1312 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_TYPE_H
+#define I40IW_TYPE_H
+#include "i40iw_user.h"
+#include "i40iw_hmc.h"
+#include "i40iw_vf.h"
+#include "i40iw_virtchnl.h"
+
+struct i40iw_cqp_sq_wqe {
+       u64 buf[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_aeqe {
+       u64 buf[I40IW_AEQE_SIZE];
+};
+
+struct i40iw_ceqe {
+       u64 buf[I40IW_CEQE_SIZE];
+};
+
+struct i40iw_cqp_ctx {
+       u64 buf[I40IW_CQP_CTX_SIZE];
+};
+
+struct i40iw_cq_shadow_area {
+       u64 buf[I40IW_SHADOW_AREA_SIZE];
+};
+
+struct i40iw_sc_dev;
+struct i40iw_hmc_info;
+struct i40iw_dev_pestat;
+
+struct i40iw_cqp_ops;
+struct i40iw_ccq_ops;
+struct i40iw_ceq_ops;
+struct i40iw_aeq_ops;
+struct i40iw_mr_ops;
+struct i40iw_cqp_misc_ops;
+struct i40iw_pd_ops;
+struct i40iw_priv_qp_ops;
+struct i40iw_priv_cq_ops;
+struct i40iw_hmc_ops;
+
+enum i40iw_resource_indicator_type {
+       I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0,
+       I40IW_RSRC_INDICATOR_TYPE_CQ,
+       I40IW_RSRC_INDICATOR_TYPE_QP,
+       I40IW_RSRC_INDICATOR_TYPE_SRQ
+};
+
+enum i40iw_hdrct_flags {
+       DDP_LEN_FLAG = 0x80,
+       DDP_HDR_FLAG = 0x40,
+       RDMA_HDR_FLAG = 0x20
+};
+
+enum i40iw_term_layers {
+       LAYER_RDMA = 0,
+       LAYER_DDP = 1,
+       LAYER_MPA = 2
+};
+
+enum i40iw_term_error_types {
+       RDMAP_REMOTE_PROT = 1,
+       RDMAP_REMOTE_OP = 2,
+       DDP_CATASTROPHIC = 0,
+       DDP_TAGGED_BUFFER = 1,
+       DDP_UNTAGGED_BUFFER = 2,
+       DDP_LLP = 3
+};
+
+enum i40iw_term_rdma_errors {
+       RDMAP_INV_STAG = 0x00,
+       RDMAP_INV_BOUNDS = 0x01,
+       RDMAP_ACCESS = 0x02,
+       RDMAP_UNASSOC_STAG = 0x03,
+       RDMAP_TO_WRAP = 0x04,
+       RDMAP_INV_RDMAP_VER = 0x05,
+       RDMAP_UNEXPECTED_OP = 0x06,
+       RDMAP_CATASTROPHIC_LOCAL = 0x07,
+       RDMAP_CATASTROPHIC_GLOBAL = 0x08,
+       RDMAP_CANT_INV_STAG = 0x09,
+       RDMAP_UNSPECIFIED = 0xff
+};
+
+enum i40iw_term_ddp_errors {
+       DDP_CATASTROPHIC_LOCAL = 0x00,
+       DDP_TAGGED_INV_STAG = 0x00,
+       DDP_TAGGED_BOUNDS = 0x01,
+       DDP_TAGGED_UNASSOC_STAG = 0x02,
+       DDP_TAGGED_TO_WRAP = 0x03,
+       DDP_TAGGED_INV_DDP_VER = 0x04,
+       DDP_UNTAGGED_INV_QN = 0x01,
+       DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
+       DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
+       DDP_UNTAGGED_INV_MO = 0x04,
+       DDP_UNTAGGED_INV_TOO_LONG = 0x05,
+       DDP_UNTAGGED_INV_DDP_VER = 0x06
+};
+
+enum i40iw_term_mpa_errors {
+       MPA_CLOSED = 0x01,
+       MPA_CRC = 0x02,
+       MPA_MARKER = 0x03,
+       MPA_REQ_RSP = 0x04,
+};
+
+enum i40iw_flush_opcode {
+       FLUSH_INVALID = 0,
+       FLUSH_PROT_ERR,
+       FLUSH_REM_ACCESS_ERR,
+       FLUSH_LOC_QP_OP_ERR,
+       FLUSH_REM_OP_ERR,
+       FLUSH_LOC_LEN_ERR,
+       FLUSH_GENERAL_ERR,
+       FLUSH_FATAL_ERR
+};
+
+enum i40iw_term_eventtypes {
+       TERM_EVENT_QP_FATAL,
+       TERM_EVENT_QP_ACCESS_ERR
+};
+
+struct i40iw_terminate_hdr {
+       u8 layer_etype;
+       u8 error_code;
+       u8 hdrct;
+       u8 rsvd;
+};
+
+enum i40iw_debug_flag {
+       I40IW_DEBUG_NONE        = 0x00000000,
+       I40IW_DEBUG_ERR         = 0x00000001,
+       I40IW_DEBUG_INIT        = 0x00000002,
+       I40IW_DEBUG_DEV         = 0x00000004,
+       I40IW_DEBUG_CM          = 0x00000008,
+       I40IW_DEBUG_VERBS       = 0x00000010,
+       I40IW_DEBUG_PUDA        = 0x00000020,
+       I40IW_DEBUG_ILQ         = 0x00000040,
+       I40IW_DEBUG_IEQ         = 0x00000080,
+       I40IW_DEBUG_QP          = 0x00000100,
+       I40IW_DEBUG_CQ          = 0x00000200,
+       I40IW_DEBUG_MR          = 0x00000400,
+       I40IW_DEBUG_PBLE        = 0x00000800,
+       I40IW_DEBUG_WQE         = 0x00001000,
+       I40IW_DEBUG_AEQ         = 0x00002000,
+       I40IW_DEBUG_CQP         = 0x00004000,
+       I40IW_DEBUG_HMC         = 0x00008000,
+       I40IW_DEBUG_USER        = 0x00010000,
+       I40IW_DEBUG_VIRT        = 0x00020000,
+       I40IW_DEBUG_DCB         = 0x00040000,
+       I40IW_DEBUG_CQE         = 0x00800000,
+       I40IW_DEBUG_ALL         = 0xFFFFFFFF
+};
+
+enum i40iw_hw_stat_index_32b {
+       I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0,
+       I40IW_HW_STAT_INDEX_IP4RXTRUNC,
+       I40IW_HW_STAT_INDEX_IP4TXNOROUTE,
+       I40IW_HW_STAT_INDEX_IP6RXDISCARD,
+       I40IW_HW_STAT_INDEX_IP6RXTRUNC,
+       I40IW_HW_STAT_INDEX_IP6TXNOROUTE,
+       I40IW_HW_STAT_INDEX_TCPRTXSEG,
+       I40IW_HW_STAT_INDEX_TCPRXOPTERR,
+       I40IW_HW_STAT_INDEX_TCPRXPROTOERR,
+       I40IW_HW_STAT_INDEX_MAX_32
+};
+
+enum i40iw_hw_stat_index_64b {
+       I40IW_HW_STAT_INDEX_IP4RXOCTS = 0,
+       I40IW_HW_STAT_INDEX_IP4RXPKTS,
+       I40IW_HW_STAT_INDEX_IP4RXFRAGS,
+       I40IW_HW_STAT_INDEX_IP4RXMCPKTS,
+       I40IW_HW_STAT_INDEX_IP4TXOCTS,
+       I40IW_HW_STAT_INDEX_IP4TXPKTS,
+       I40IW_HW_STAT_INDEX_IP4TXFRAGS,
+       I40IW_HW_STAT_INDEX_IP4TXMCPKTS,
+       I40IW_HW_STAT_INDEX_IP6RXOCTS,
+       I40IW_HW_STAT_INDEX_IP6RXPKTS,
+       I40IW_HW_STAT_INDEX_IP6RXFRAGS,
+       I40IW_HW_STAT_INDEX_IP6RXMCPKTS,
+       I40IW_HW_STAT_INDEX_IP6TXOCTS,
+       I40IW_HW_STAT_INDEX_IP6TXPKTS,
+       I40IW_HW_STAT_INDEX_IP6TXFRAGS,
+       I40IW_HW_STAT_INDEX_IP6TXMCPKTS,
+       I40IW_HW_STAT_INDEX_TCPRXSEGS,
+       I40IW_HW_STAT_INDEX_TCPTXSEG,
+       I40IW_HW_STAT_INDEX_RDMARXRDS,
+       I40IW_HW_STAT_INDEX_RDMARXSNDS,
+       I40IW_HW_STAT_INDEX_RDMARXWRS,
+       I40IW_HW_STAT_INDEX_RDMATXRDS,
+       I40IW_HW_STAT_INDEX_RDMATXSNDS,
+       I40IW_HW_STAT_INDEX_RDMATXWRS,
+       I40IW_HW_STAT_INDEX_RDMAVBND,
+       I40IW_HW_STAT_INDEX_RDMAVINV,
+       I40IW_HW_STAT_INDEX_MAX_64
+};
+
+struct i40iw_dev_hw_stat_offsets {
+       u32 stat_offset_32[I40IW_HW_STAT_INDEX_MAX_32];
+       u32 stat_offset_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_dev_hw_stats {
+       u64 stat_value_32[I40IW_HW_STAT_INDEX_MAX_32];
+       u64 stat_value_64[I40IW_HW_STAT_INDEX_MAX_64];
+};
+
+struct i40iw_device_pestat_ops {
+       void (*iw_hw_stat_init)(struct i40iw_dev_pestat *, u8, struct i40iw_hw *, bool);
+       void (*iw_hw_stat_read_32)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_32b, u64 *);
+       void (*iw_hw_stat_read_64)(struct i40iw_dev_pestat *, enum i40iw_hw_stat_index_64b, u64 *);
+       void (*iw_hw_stat_read_all)(struct i40iw_dev_pestat *, struct i40iw_dev_hw_stats *);
+       void (*iw_hw_stat_refresh_all)(struct i40iw_dev_pestat *);
+};
+
+struct i40iw_dev_pestat {
+       struct i40iw_hw *hw;
+       struct i40iw_device_pestat_ops ops;
+       struct i40iw_dev_hw_stats hw_stats;
+       struct i40iw_dev_hw_stats last_read_hw_stats;
+       struct i40iw_dev_hw_stat_offsets hw_stat_offsets;
+       struct timer_list stats_timer;
+       spinlock_t stats_lock; /* rdma stats lock */
+};
+
+struct i40iw_hw {
+       u8 __iomem *hw_addr;
+       void *dev_context;
+       struct i40iw_hmc_info hmc;
+};
+
+struct i40iw_pfpdu {
+       struct list_head rxlist;
+       u32 rcv_nxt;
+       u32 fps;
+       u32 max_fpdu_data;
+       bool mode;
+       bool mpa_crc_err;
+       u64 total_ieq_bufs;
+       u64 fpdu_processed;
+       u64 bad_seq_num;
+       u64 crc_err;
+       u64 no_tx_bufs;
+       u64 tx_err;
+       u64 out_of_order;
+       u64 pmode_count;
+};
+
+struct i40iw_sc_pd {
+       u32 size;
+       struct i40iw_sc_dev *dev;
+       u16 pd_id;
+};
+
+struct i40iw_cqp_quanta {
+       u64 elem[I40IW_CQP_WQE_SIZE];
+};
+
+struct i40iw_sc_cqp {
+       u32 size;
+       u64 sq_pa;
+       u64 host_ctx_pa;
+       void *back_cqp;
+       struct i40iw_sc_dev *dev;
+       enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *,
+                                                 struct i40iw_update_sds_info *);
+       struct i40iw_dma_mem sdbuf;
+       struct i40iw_ring sq_ring;
+       struct i40iw_cqp_quanta *sq_base;
+       u64 *host_ctx;
+       u64 *scratch_array;
+       u32 cqp_id;
+       u32 sq_size;
+       u32 hw_sq_size;
+       u8 struct_ver;
+       u8 polarity;
+       bool en_datacenter_tcp;
+       u8 hmc_profile;
+       u8 enabled_vf_count;
+       u8 timeout_count;
+};
+
+struct i40iw_sc_aeq {
+       u32 size;
+       u64 aeq_elem_pa;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_sc_aeqe *aeqe_base;
+       void *pbl_list;
+       u32 elem_cnt;
+       struct i40iw_ring aeq_ring;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       u32 first_pm_pbl_idx;
+       u8 polarity;
+};
+
+struct i40iw_sc_ceq {
+       u32 size;
+       u64 ceq_elem_pa;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_ceqe *ceqe_base;
+       void *pbl_list;
+       u32 ceq_id;
+       u32 elem_cnt;
+       struct i40iw_ring ceq_ring;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       bool tph_en;
+       u8 tph_val;
+       u32 first_pm_pbl_idx;
+       u8 polarity;
+};
+
+struct i40iw_sc_cq {
+       struct i40iw_cq_uk cq_uk;
+       u64 cq_pa;
+       u64 shadow_area_pa;
+       struct i40iw_sc_dev *dev;
+       void *pbl_list;
+       void *back_cq;
+       u32 ceq_id;
+       u32 shadow_read_threshold;
+       bool ceqe_mask;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       u8 cq_type;
+       bool ceq_id_valid;
+       bool tph_en;
+       u8 tph_val;
+       u32 first_pm_pbl_idx;
+       bool check_overflow;
+};
+
+struct i40iw_sc_qp {
+       struct i40iw_qp_uk qp_uk;
+       u64 sq_pa;
+       u64 rq_pa;
+       u64 hw_host_ctx_pa;
+       u64 shadow_area_pa;
+       u64 q2_pa;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_sc_pd *pd;
+       u64 *hw_host_ctx;
+       void *llp_stream_handle;
+       void *back_qp;
+       struct i40iw_pfpdu pfpdu;
+       u8 *q2_buf;
+       u64 qp_compl_ctx;
+       u16 qs_handle;
+       u16 exception_lan_queue;
+       u16 push_idx;
+       u8 sq_tph_val;
+       u8 rq_tph_val;
+       u8 qp_state;
+       u8 qp_type;
+       u8 hw_sq_size;
+       u8 hw_rq_size;
+       u8 src_mac_addr_idx;
+       bool sq_tph_en;
+       bool rq_tph_en;
+       bool rcv_tph_en;
+       bool xmit_tph_en;
+       bool virtual_map;
+       bool flush_sq;
+       bool flush_rq;
+       bool sq_flush;
+       enum i40iw_flush_opcode flush_code;
+       enum i40iw_term_eventtypes eventtype;
+       u8 term_flags;
+};
+
+struct i40iw_hmc_fpm_misc {
+       u32 max_ceqs;
+       u32 max_sds;
+       u32 xf_block_size;
+       u32 q1_block_size;
+       u32 ht_multiplier;
+       u32 timer_bucket;
+};
+
+struct i40iw_vchnl_if {
+       enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16);
+       enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16);
+};
+
+#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512
+
+struct i40iw_vchnl_vf_msg_buffer {
+       struct i40iw_virtchnl_op_buf vchnl_msg;
+       char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1];
+};
+
+struct i40iw_vfdev {
+       struct i40iw_sc_dev *pf_dev;
+       u8 *hmc_info_mem;
+       struct i40iw_dev_pestat dev_pestat;
+       struct i40iw_hmc_pble_info *pble_info;
+       struct i40iw_hmc_info hmc_info;
+       struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer;
+       u64 fpm_query_buf_pa;
+       u64 *fpm_query_buf;
+       u32 vf_id;
+       u32 msg_count;
+       bool pf_hmc_initialized;
+       u16 pmf_index;
+       u16 iw_vf_idx;          /* VF Device table index */
+       bool stats_initialized;
+};
+
+struct i40iw_sc_dev {
+       struct list_head cqp_cmd_head;  /* head of the CQP command list */
+       spinlock_t cqp_lock; /* cqp list sync */
+       struct i40iw_dev_uk dev_uk;
+       struct i40iw_dev_pestat dev_pestat;
+       struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT];
+       u64 fpm_query_buf_pa;
+       u64 fpm_commit_buf_pa;
+       u64 *fpm_query_buf;
+       u64 *fpm_commit_buf;
+       void *back_dev;
+       struct i40iw_hw *hw;
+       u8 __iomem *db_addr;
+       struct i40iw_hmc_info *hmc_info;
+       struct i40iw_hmc_pble_info *pble_info;
+       struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT];
+       struct i40iw_sc_cqp *cqp;
+       struct i40iw_sc_aeq *aeq;
+       struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT];
+       struct i40iw_sc_cq *ccq;
+       struct i40iw_cqp_ops *cqp_ops;
+       struct i40iw_ccq_ops *ccq_ops;
+       struct i40iw_ceq_ops *ceq_ops;
+       struct i40iw_aeq_ops *aeq_ops;
+       struct i40iw_pd_ops *iw_pd_ops;
+       struct i40iw_priv_qp_ops *iw_priv_qp_ops;
+       struct i40iw_priv_cq_ops *iw_priv_cq_ops;
+       struct i40iw_mr_ops *mr_ops;
+       struct i40iw_cqp_misc_ops *cqp_misc_ops;
+       struct i40iw_hmc_ops *hmc_ops;
+       struct i40iw_vchnl_if vchnl_if;
+       u32 ilq_count;
+       struct i40iw_virt_mem ilq_mem;
+       struct i40iw_puda_rsrc *ilq;
+       u32 ieq_count;
+       struct i40iw_virt_mem ieq_mem;
+       struct i40iw_puda_rsrc *ieq;
+
+       struct i40iw_vf_cqp_ops *iw_vf_cqp_ops;
+
+       struct i40iw_hmc_fpm_misc hmc_fpm_misc;
+       u16 qs_handle;
+       u32     debug_mask;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+       bool is_pf;
+       bool vchnl_up;
+       u8 vf_id;
+       u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY];
+       struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf;
+       u8 hw_rev;
+};
+
+struct i40iw_modify_cq_info {
+       u64 cq_pa;
+       struct i40iw_cqe *cq_base;
+       void *pbl_list;
+       u32 ceq_id;
+       u32 cq_size;
+       u32 shadow_read_threshold;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       bool check_overflow;
+       bool cq_resize;
+       bool ceq_change;
+       bool check_overflow_change;
+       u32 first_pm_pbl_idx;
+       bool ceq_valid;
+};
+
+struct i40iw_create_qp_info {
+       u8 next_iwarp_state;
+       bool ord_valid;
+       bool tcp_ctx_valid;
+       bool cq_num_valid;
+       bool static_rsrc;
+       bool arp_cache_idx_valid;
+};
+
+struct i40iw_modify_qp_info {
+       u64 rx_win0;
+       u64 rx_win1;
+       u16 new_mss;
+       u8 next_iwarp_state;
+       u8 termlen;
+       bool ord_valid;
+       bool tcp_ctx_valid;
+       bool cq_num_valid;
+       bool static_rsrc;
+       bool arp_cache_idx_valid;
+       bool reset_tcp_conn;
+       bool remove_hash_idx;
+       bool dont_send_term;
+       bool dont_send_fin;
+       bool cached_var_valid;
+       bool mss_change;
+       bool force_loopback;
+};
+
+struct i40iw_ccq_cqe_info {
+       struct i40iw_sc_cqp *cqp;
+       u64 scratch;
+       u32 op_ret_val;
+       u16 maj_err_code;
+       u16 min_err_code;
+       u8 op_code;
+       bool error;
+};
+
+struct i40iw_l2params {
+       u16 qs_handle_list[I40IW_MAX_USER_PRIORITY];
+       u16 mss;
+};
+
+struct i40iw_device_init_info {
+       u64 fpm_query_buf_pa;
+       u64 fpm_commit_buf_pa;
+       u64 *fpm_query_buf;
+       u64 *fpm_commit_buf;
+       struct i40iw_hw *hw;
+       void __iomem *bar0;
+       enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16);
+       u16 qs_handle;
+       u16 exception_lan_queue;
+       u8 hmc_fn_id;
+       bool is_pf;
+       u32 debug_mask;
+};
+
+enum i40iw_cqp_hmc_profile {
+       I40IW_HMC_PROFILE_DEFAULT = 1,
+       I40IW_HMC_PROFILE_FAVOR_VF = 2,
+       I40IW_HMC_PROFILE_EQUAL = 3,
+};
+
+struct i40iw_cqp_init_info {
+       u64 cqp_compl_ctx;
+       u64 host_ctx_pa;
+       u64 sq_pa;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_cqp_quanta *sq;
+       u64 *host_ctx;
+       u64 *scratch_array;
+       u32 sq_size;
+       u8 struct_ver;
+       bool en_datacenter_tcp;
+       u8 hmc_profile;
+       u8 enabled_vf_count;
+};
+
+struct i40iw_ceq_init_info {
+       u64 ceqe_pa;
+       struct i40iw_sc_dev *dev;
+       u64 *ceqe_base;
+       void *pbl_list;
+       u32 elem_cnt;
+       u32 ceq_id;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       bool tph_en;
+       u8 tph_val;
+       u32 first_pm_pbl_idx;
+};
+
+struct i40iw_aeq_init_info {
+       u64 aeq_elem_pa;
+       struct i40iw_sc_dev *dev;
+       u32 *aeqe_base;
+       void *pbl_list;
+       u32 elem_cnt;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       u32 first_pm_pbl_idx;
+};
+
+struct i40iw_ccq_init_info {
+       u64 cq_pa;
+       u64 shadow_area_pa;
+       struct i40iw_sc_dev *dev;
+       struct i40iw_cqe *cq_base;
+       u64 *shadow_area;
+       void *pbl_list;
+       u32 num_elem;
+       u32 ceq_id;
+       u32 shadow_read_threshold;
+       bool ceqe_mask;
+       bool ceq_id_valid;
+       bool tph_en;
+       u8 tph_val;
+       bool avoid_mem_cflct;
+       bool virtual_map;
+       u8 pbl_chunk_size;
+       u32 first_pm_pbl_idx;
+};
+
+struct i40iwarp_offload_info {
+       u16 rcv_mark_offset;
+       u16 snd_mark_offset;
+       u16 pd_id;
+       u8 ddp_ver;
+       u8 rdmap_ver;
+       u8 ord_size;
+       u8 ird_size;
+       bool wr_rdresp_en;
+       bool rd_enable;
+       bool snd_mark_en;
+       bool rcv_mark_en;
+       bool bind_en;
+       bool fast_reg_en;
+       bool priv_mode_en;
+       bool lsmm_present;
+       u8 iwarp_mode;
+       bool align_hdrs;
+       bool rcv_no_mpa_crc;
+
+       u8 last_byte_sent;
+};
+
+struct i40iw_tcp_offload_info {
+       bool ipv4;
+       bool no_nagle;
+       bool insert_vlan_tag;
+       bool time_stamp;
+       u8 cwnd_inc_limit;
+       bool drop_ooo_seg;
+       bool dup_ack_thresh;
+       u8 ttl;
+       u8 src_mac_addr_idx;
+       bool avoid_stretch_ack;
+       u8 tos;
+       u16 src_port;
+       u16 dst_port;
+       u32 dest_ip_addr0;
+       u32 dest_ip_addr1;
+       u32 dest_ip_addr2;
+       u32 dest_ip_addr3;
+       u32 snd_mss;
+       u16 vlan_tag;
+       u16 arp_idx;
+       u32 flow_label;
+       bool wscale;
+       u8 tcp_state;
+       u8 snd_wscale;
+       u8 rcv_wscale;
+       u32 time_stamp_recent;
+       u32 time_stamp_age;
+       u32 snd_nxt;
+       u32 snd_wnd;
+       u32 rcv_nxt;
+       u32 rcv_wnd;
+       u32 snd_max;
+       u32 snd_una;
+       u32 srtt;
+       u32 rtt_var;
+       u32 ss_thresh;
+       u32 cwnd;
+       u32 snd_wl1;
+       u32 snd_wl2;
+       u32 max_snd_window;
+       u8 rexmit_thresh;
+       u32 local_ipaddr0;
+       u32 local_ipaddr1;
+       u32 local_ipaddr2;
+       u32 local_ipaddr3;
+       bool ignore_tcp_opt;
+       bool ignore_tcp_uns_opt;
+};
+
+struct i40iw_qp_host_ctx_info {
+       u64 qp_compl_ctx;
+       struct i40iw_tcp_offload_info *tcp_info;
+       struct i40iwarp_offload_info *iwarp_info;
+       u32 send_cq_num;
+       u32 rcv_cq_num;
+       u16 push_idx;
+       bool push_mode_en;
+       bool tcp_info_valid;
+       bool iwarp_info_valid;
+       bool err_rq_idx_valid;
+       u16 err_rq_idx;
+};
+
+struct i40iw_aeqe_info {
+       u64 compl_ctx;
+       u32 qp_cq_id;
+       u16 ae_id;
+       u16 wqe_idx;
+       u8 tcp_state;
+       u8 iwarp_state;
+       bool qp;
+       bool cq;
+       bool sq;
+       bool in_rdrsp_wr;
+       bool out_rdrsp;
+       u8 q2_data_written;
+       bool aeqe_overflow;
+};
+
+struct i40iw_allocate_stag_info {
+       u64 total_len;
+       u32 chunk_size;
+       u32 stag_idx;
+       u32 page_size;
+       u16 pd_id;
+       u16 access_rights;
+       bool remote_access;
+       bool use_hmc_fcn_index;
+       u8 hmc_fcn_index;
+       bool use_pf_rid;
+};
+
+struct i40iw_reg_ns_stag_info {
+       u64 reg_addr_pa;
+       u64 fbo;
+       void *va;
+       u64 total_len;
+       u32 page_size;
+       u32 chunk_size;
+       u32 first_pm_pbl_index;
+       enum i40iw_addressing_type addr_type;
+       i40iw_stag_index stag_idx;
+       u16 access_rights;
+       u16 pd_id;
+       i40iw_stag_key stag_key;
+       bool use_hmc_fcn_index;
+       u8 hmc_fcn_index;
+       bool use_pf_rid;
+};
+
+struct i40iw_fast_reg_stag_info {
+       u64 wr_id;
+       u64 reg_addr_pa;
+       u64 fbo;
+       void *va;
+       u64 total_len;
+       u32 page_size;
+       u32 chunk_size;
+       u32 first_pm_pbl_index;
+       enum i40iw_addressing_type addr_type;
+       i40iw_stag_index stag_idx;
+       u16 access_rights;
+       u16 pd_id;
+       i40iw_stag_key stag_key;
+       bool local_fence;
+       bool read_fence;
+       bool signaled;
+       bool use_hmc_fcn_index;
+       u8 hmc_fcn_index;
+       bool use_pf_rid;
+       bool defer_flag;
+};
+
+struct i40iw_dealloc_stag_info {
+       u32 stag_idx;
+       u16 pd_id;
+       bool mr;
+       bool dealloc_pbl;
+};
+
+struct i40iw_register_shared_stag {
+       void *va;
+       enum i40iw_addressing_type addr_type;
+       i40iw_stag_index new_stag_idx;
+       i40iw_stag_index parent_stag_idx;
+       u32 access_rights;
+       u16 pd_id;
+       i40iw_stag_key new_stag_key;
+};
+
+struct i40iw_qp_init_info {
+       struct i40iw_qp_uk_init_info qp_uk_init_info;
+       struct i40iw_sc_pd *pd;
+       u64 *host_ctx;
+       u8 *q2;
+       u64 sq_pa;
+       u64 rq_pa;
+       u64 host_ctx_pa;
+       u64 q2_pa;
+       u64 shadow_area_pa;
+       u8 sq_tph_val;
+       u8 rq_tph_val;
+       u8 type;
+       bool sq_tph_en;
+       bool rq_tph_en;
+       bool rcv_tph_en;
+       bool xmit_tph_en;
+       bool virtual_map;
+};
+
+struct i40iw_cq_init_info {
+       struct i40iw_sc_dev *dev;
+       u64 cq_base_pa;
+       u64 shadow_area_pa;
+       u32 ceq_id;
+       u32 shadow_read_threshold;
+       bool virtual_map;
+       bool ceqe_mask;
+       u8 pbl_chunk_size;
+       u32 first_pm_pbl_idx;
+       bool ceq_id_valid;
+       bool tph_en;
+       u8 tph_val;
+       u8 type;
+       struct i40iw_cq_uk_init_info cq_uk_init_info;
+};
+
+struct i40iw_upload_context_info {
+       u64 buf_pa;
+       bool freeze_qp;
+       bool raw_format;
+       u32 qp_id;
+       u8 qp_type;
+};
+
+struct i40iw_add_arp_cache_entry_info {
+       u8 mac_addr[6];
+       u32 reach_max;
+       u16 arp_index;
+       bool permanent;
+};
+
+struct i40iw_apbvt_info {
+       u16 port;
+       bool add;
+};
+
+enum i40iw_quad_entry_type {
+       I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1,
+       I40IW_QHASH_TYPE_TCP_SYN,
+};
+
+enum i40iw_quad_hash_manage_type {
+       I40IW_QHASH_MANAGE_TYPE_DELETE = 0,
+       I40IW_QHASH_MANAGE_TYPE_ADD,
+       I40IW_QHASH_MANAGE_TYPE_MODIFY
+};
+
+struct i40iw_qhash_table_info {
+       enum i40iw_quad_hash_manage_type manage;
+       enum i40iw_quad_entry_type entry_type;
+       bool vlan_valid;
+       bool ipv4_valid;
+       u8 mac_addr[6];
+       u16 vlan_id;
+       u16 qs_handle;
+       u32 qp_num;
+       u32 dest_ip[4];
+       u32 src_ip[4];
+       u32 dest_port;
+       u32 src_port;
+};
+
+struct i40iw_local_mac_ipaddr_entry_info {
+       u8 mac_addr[6];
+       u8 entry_idx;
+};
+
+struct i40iw_cqp_manage_push_page_info {
+       u32 push_idx;
+       u16 qs_handle;
+       u8 free_page;
+};
+
+struct i40iw_qp_flush_info {
+       u16 sq_minor_code;
+       u16 sq_major_code;
+       u16 rq_minor_code;
+       u16 rq_major_code;
+       u16 ae_code;
+       u8 ae_source;
+       bool sq;
+       bool rq;
+       bool userflushcode;
+       bool generate_ae;
+};
+
+struct i40iw_cqp_commit_fpm_values {
+       u64 qp_base;
+       u64 cq_base;
+       u32 hte_base;
+       u32 arp_base;
+       u32 apbvt_inuse_base;
+       u32 mr_base;
+       u32 xf_base;
+       u32 xffl_base;
+       u32 q1_base;
+       u32 q1fl_base;
+       u32 fsimc_base;
+       u32 fsiav_base;
+       u32 pbl_base;
+
+       u32 qp_cnt;
+       u32 cq_cnt;
+       u32 hte_cnt;
+       u32 arp_cnt;
+       u32 mr_cnt;
+       u32 xf_cnt;
+       u32 xffl_cnt;
+       u32 q1_cnt;
+       u32 q1fl_cnt;
+       u32 fsimc_cnt;
+       u32 fsiav_cnt;
+       u32 pbl_cnt;
+};
+
+struct i40iw_cqp_query_fpm_values {
+       u16 first_pe_sd_index;
+       u32 qp_objsize;
+       u32 cq_objsize;
+       u32 hte_objsize;
+       u32 arp_objsize;
+       u32 mr_objsize;
+       u32 xf_objsize;
+       u32 q1_objsize;
+       u32 fsimc_objsize;
+       u32 fsiav_objsize;
+
+       u32 qp_max;
+       u32 cq_max;
+       u32 hte_max;
+       u32 arp_max;
+       u32 mr_max;
+       u32 xf_max;
+       u32 xffl_max;
+       u32 q1_max;
+       u32 q1fl_max;
+       u32 fsimc_max;
+       u32 fsiav_max;
+       u32 pbl_max;
+};
+
+struct i40iw_cqp_ops {
+       enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *,
+                                          struct i40iw_cqp_init_info *);
+       enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, bool, u16 *, u16 *);
+       void (*cqp_post_sq)(struct i40iw_sc_cqp *);
+       u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch);
+       enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *);
+       enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8,
+                                                      struct i40iw_ccq_cqe_info *);
+};
+
+struct i40iw_ccq_ops {
+       enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *,
+                                          struct i40iw_ccq_init_info *);
+       enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+       enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool);
+       enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *);
+       enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *,
+                                                  struct i40iw_ccq_cqe_info *);
+       void (*ccq_arm)(struct i40iw_sc_cq *);
+};
+
+struct i40iw_ceq_ops {
+       enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *,
+                                          struct i40iw_ceq_init_info *);
+       enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool);
+       enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *);
+       enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *);
+       enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64);
+       enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool);
+       void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *);
+};
+
+struct i40iw_aeq_ops {
+       enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *,
+                                          struct i40iw_aeq_init_info *);
+       enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool);
+       enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool);
+       enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *,
+                                               struct i40iw_aeqe_info *);
+       enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32);
+       enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *);
+       enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *);
+};
+
+struct i40iw_pd_ops {
+       void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16);
+};
+
+struct i40iw_priv_qp_ops {
+       enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *);
+       enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *,
+                                           struct i40iw_create_qp_info *, u64, bool);
+       enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *,
+                                           struct i40iw_modify_qp_info *, u64, bool);
+       enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool);
+       enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *,
+                                               struct i40iw_qp_flush_info *, u64, bool);
+       enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *,
+                                                   struct i40iw_upload_context_info *,
+                                                   u64, bool);
+       enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *,
+                                           struct i40iw_qp_host_ctx_info *);
+
+       void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag);
+       void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32);
+       void (*qp_send_rtt)(struct i40iw_sc_qp *, bool);
+       enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8);
+};
+
+struct i40iw_priv_cq_ops {
+       enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *);
+       enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool);
+       enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool);
+       enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *,
+                                           struct i40iw_modify_cq_info *, u64, bool);
+};
+
+struct i40iw_mr_ops {
+       enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *,
+                                            struct i40iw_allocate_stag_info *, u64, bool);
+       enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *,
+                                                   struct i40iw_reg_ns_stag_info *,
+                                                   u64, bool);
+       enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *,
+                                               struct i40iw_register_shared_stag *,
+                                               u64, bool);
+       enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *,
+                                              struct i40iw_dealloc_stag_info *,
+                                              u64, bool);
+       enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool);
+       enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool);
+};
+
+struct i40iw_cqp_misc_ops {
+       enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *,
+                                                  struct i40iw_cqp_manage_push_page_info *,
+                                                  u64, bool);
+       enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *,
+                                                          u64, u8, bool, bool);
+       enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *,
+                                                          u64, u8, u8, bool, bool);
+       enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+                                                   struct i40iw_dma_mem *, bool, u8);
+       enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8,
+                                                  struct i40iw_dma_mem *, bool, u8);
+       enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *,
+                                                            u64, u8, bool, bool);
+       enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *,
+                                                     struct i40iw_add_arp_cache_entry_info *,
+                                                     u64, bool);
+       enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+       enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool);
+       enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *,
+                                                    struct i40iw_apbvt_info *, u64, bool);
+       enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *,
+                                                          struct i40iw_qhash_table_info *, u64, bool);
+       enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool);
+       enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *,
+                                                            struct i40iw_local_mac_ipaddr_entry_info *,
+                                                            u64, bool);
+       enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool);
+       enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool);
+       enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp
+                                                         *);
+       enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *);
+       enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *);
+       enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+       enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64);
+};
+
+struct i40iw_hmc_ops {
+       enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8);
+       enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *,
+                                                     struct i40iw_hmc_fpm_misc *);
+       enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8);
+       enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *);
+       enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev,
+                                                   struct i40iw_hmc_create_obj_info *);
+       enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev,
+                                                struct i40iw_hmc_del_obj_info *,
+                                                bool reset);
+       enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *);
+       enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *);
+};
+
+struct cqp_info {
+       union {
+               struct {
+                       struct i40iw_sc_qp *qp;
+                       struct i40iw_create_qp_info info;
+                       u64 scratch;
+               } qp_create;
+
+               struct {
+                       struct i40iw_sc_qp *qp;
+                       struct i40iw_modify_qp_info info;
+                       u64 scratch;
+               } qp_modify;
+
+               struct {
+                       struct i40iw_sc_qp *qp;
+                       u64 scratch;
+                       bool remove_hash_idx;
+                       bool ignore_mw_bnd;
+               } qp_destroy;
+
+               struct {
+                       struct i40iw_sc_cq *cq;
+                       u64 scratch;
+                       bool check_overflow;
+               } cq_create;
+
+               struct {
+                       struct i40iw_sc_cq *cq;
+                       u64 scratch;
+               } cq_destroy;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_allocate_stag_info info;
+                       u64 scratch;
+               } alloc_stag;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       u64 scratch;
+                       u32 mw_stag_index;
+                       u16 pd_id;
+               } mw_alloc;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_reg_ns_stag_info info;
+                       u64 scratch;
+               } mr_reg_non_shared;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_dealloc_stag_info info;
+                       u64 scratch;
+               } dealloc_stag;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_local_mac_ipaddr_entry_info info;
+                       u64 scratch;
+               } add_local_mac_ipaddr_entry;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_add_arp_cache_entry_info info;
+                       u64 scratch;
+               } add_arp_cache_entry;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       u64 scratch;
+                       u8 entry_idx;
+                       u8 ignore_ref_count;
+               } del_local_mac_ipaddr_entry;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       u64 scratch;
+                       u16 arp_index;
+               } del_arp_cache_entry;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_manage_vf_pble_info info;
+                       u64 scratch;
+               } manage_vf_pble_bp;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_cqp_manage_push_page_info info;
+                       u64 scratch;
+               } manage_push_page;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_upload_context_info info;
+                       u64 scratch;
+               } qp_upload_context;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       u64 scratch;
+               } alloc_local_mac_ipaddr_entry;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_hmc_fcn_info info;
+                       u64 scratch;
+               } manage_hmc_pm;
+
+               struct {
+                       struct i40iw_sc_ceq *ceq;
+                       u64 scratch;
+               } ceq_create;
+
+               struct {
+                       struct i40iw_sc_ceq *ceq;
+                       u64 scratch;
+               } ceq_destroy;
+
+               struct {
+                       struct i40iw_sc_aeq *aeq;
+                       u64 scratch;
+               } aeq_create;
+
+               struct {
+                       struct i40iw_sc_aeq *aeq;
+                       u64 scratch;
+               } aeq_destroy;
+
+               struct {
+                       struct i40iw_sc_qp *qp;
+                       struct i40iw_qp_flush_info info;
+                       u64 scratch;
+               } qp_flush_wqes;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       void *fpm_values_va;
+                       u64 fpm_values_pa;
+                       u8 hmc_fn_id;
+                       u64 scratch;
+               } query_fpm_values;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       void *fpm_values_va;
+                       u64 fpm_values_pa;
+                       u8 hmc_fn_id;
+                       u64 scratch;
+               } commit_fpm_values;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_apbvt_info info;
+                       u64 scratch;
+               } manage_apbvt_entry;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_qhash_table_info info;
+                       u64 scratch;
+               } manage_qhash_table_entry;
+
+               struct {
+                       struct i40iw_sc_dev *dev;
+                       struct i40iw_update_sds_info info;
+                       u64 scratch;
+               } update_pe_sds;
+
+               struct {
+                       struct i40iw_sc_cqp *cqp;
+                       struct i40iw_sc_qp *qp;
+                       u64 scratch;
+               } suspend_resume;
+       } u;
+};
+
+struct cqp_commands_info {
+       struct list_head cqp_cmd_entry;
+       u8 cqp_cmd;
+       u8 post_sq;
+       struct cqp_info in;
+};
+
+struct i40iw_virtchnl_work_info {
+       void (*callback_fcn)(void *vf_dev);
+       void *worker_vf_dev;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ucontext.h b/drivers/infiniband/hw/i40iw/i40iw_ucontext.h
new file mode 100644 (file)
index 0000000..12acd68
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2006 - 2016 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2005 Topspin Communications.  All rights reserved.
+ * Copyright (c) 2005 Cisco Systems.  All rights reserved.
+ * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef I40IW_USER_CONTEXT_H
+#define I40IW_USER_CONTEXT_H
+
+#include <linux/types.h>
+
+#define I40IW_ABI_USERSPACE_VER 4
+#define I40IW_ABI_KERNEL_VER    4
+struct i40iw_alloc_ucontext_req {
+       __u32 reserved32;
+       __u8 userspace_ver;
+       __u8 reserved8[3];
+};
+
+struct i40iw_alloc_ucontext_resp {
+       __u32 max_pds;          /* maximum pds allowed for this user process */
+       __u32 max_qps;          /* maximum qps allowed for this user process */
+       __u32 wq_size;          /* size of the WQs (sq+rq) allocated to the mmaped area */
+       __u8 kernel_ver;
+       __u8 reserved[3];
+};
+
+struct i40iw_alloc_pd_resp {
+       __u32 pd_id;
+       __u8 reserved[4];
+};
+
+struct i40iw_create_cq_req {
+       __u64 user_cq_buffer;
+       __u64 user_shadow_area;
+};
+
+struct i40iw_create_qp_req {
+       __u64 user_wqe_buffers;
+       __u64 user_compl_ctx;
+
+       /* UDA QP PHB */
+       __u64 user_sq_phb;      /* place for VA of the sq phb buff */
+       __u64 user_rq_phb;      /* place for VA of the rq phb buff */
+};
+
+enum i40iw_memreg_type {
+       IW_MEMREG_TYPE_MEM = 0x0000,
+       IW_MEMREG_TYPE_QP = 0x0001,
+       IW_MEMREG_TYPE_CQ = 0x0002,
+};
+
+struct i40iw_mem_reg_req {
+       __u16 reg_type;         /* Memory, QP or CQ */
+       __u16 cq_pages;
+       __u16 rq_pages;
+       __u16 sq_pages;
+};
+
+struct i40iw_create_cq_resp {
+       __u32 cq_id;
+       __u32 cq_size;
+       __u32 mmap_db_index;
+       __u32 reserved;
+};
+
+struct i40iw_create_qp_resp {
+       __u32 qp_id;
+       __u32 actual_sq_size;
+       __u32 actual_rq_size;
+       __u32 i40iw_drv_opt;
+       __u16 push_idx;
+       __u8  lsmm;
+       __u8  rsvd2;
+};
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c
new file mode 100644 (file)
index 0000000..f78c3dc
--- /dev/null
@@ -0,0 +1,1204 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_status.h"
+#include "i40iw_d.h"
+#include "i40iw_user.h"
+#include "i40iw_register.h"
+
+static u32 nop_signature = 0x55550000;
+
+/**
+ * i40iw_nop_1 - insert a nop wqe and move head. no post work
+ * @qp: hw qp ptr
+ */
+static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp)
+{
+       u64 header, *wqe;
+       u64 *wqe_0 = NULL;
+       u32 wqe_idx, peek_head;
+       bool signaled = false;
+
+       if (!qp->sq_ring.head)
+               return I40IW_ERR_PARAM;
+
+       wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+       wqe = qp->sq_base[wqe_idx].elem;
+       peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size;
+       wqe_0 = qp->sq_base[peek_head].elem;
+       if (peek_head)
+               wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+       else
+               wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       set_64bit_val(wqe, 0, 0);
+       set_64bit_val(wqe, 8, 0);
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+           LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++;
+
+       wmb();  /* Memory barrier to ensure data is written before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+       return 0;
+}
+
+/**
+ * i40iw_qp_post_wr - post wr to hrdware
+ * @qp: hw qp ptr
+ */
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp)
+{
+       u64 temp;
+       u32 hw_sq_tail;
+       u32 sw_sq_head;
+
+       mb(); /* valid bit is written and loads completed before reading shadow */
+
+       /* read the doorbell shadow area */
+       get_64bit_val(qp->shadow_area, 0, &temp);
+
+       hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL);
+       sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+       if (sw_sq_head != hw_sq_tail) {
+               if (sw_sq_head > qp->initial_ring.head) {
+                       if ((hw_sq_tail >= qp->initial_ring.head) &&
+                           (hw_sq_tail < sw_sq_head)) {
+                               writel(qp->qp_id, qp->wqe_alloc_reg);
+                       }
+               } else if (sw_sq_head != qp->initial_ring.head) {
+                       if ((hw_sq_tail >= qp->initial_ring.head) ||
+                           (hw_sq_tail < sw_sq_head)) {
+                               writel(qp->qp_id, qp->wqe_alloc_reg);
+                       }
+               }
+       }
+
+       qp->initial_ring.head = qp->sq_ring.head;
+}
+
+/**
+ * i40iw_qp_ring_push_db -  ring qp doorbell
+ * @qp: hw qp ptr
+ * @wqe_idx: wqe index
+ */
+static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx)
+{
+       set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id);
+       qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+}
+
+/**
+ * i40iw_qp_get_next_send_wqe - return next wqe ptr
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ * @wqe_size: size of sq wqe
+ */
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp,
+                               u32 *wqe_idx,
+                               u8 wqe_size)
+{
+       u64 *wqe = NULL;
+       u64 wqe_ptr;
+       u32 peek_head = 0;
+       u16 offset;
+       enum i40iw_status_code ret_code = 0;
+       u8 nop_wqe_cnt = 0, i;
+       u64 *wqe_0 = NULL;
+
+       *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+
+       if (!*wqe_idx)
+               qp->swqe_polarity = !qp->swqe_polarity;
+       wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem;
+       offset = (u16)(wqe_ptr) & 0x7F;
+       if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) {
+               nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE;
+               for (i = 0; i < nop_wqe_cnt; i++) {
+                       i40iw_nop_1(qp);
+                       I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+                       if (ret_code)
+                               return NULL;
+               }
+
+               *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+               if (!*wqe_idx)
+                       qp->swqe_polarity = !qp->swqe_polarity;
+       }
+       for (i = 0; i < wqe_size / I40IW_QP_WQE_MIN_SIZE; i++) {
+               I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+               if (ret_code)
+                       return NULL;
+       }
+
+       wqe = qp->sq_base[*wqe_idx].elem;
+
+       peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring);
+       wqe_0 = qp->sq_base[peek_head].elem;
+       if (peek_head & 0x3)
+               wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID);
+       return wqe;
+}
+
+/**
+ * i40iw_set_fragment - set fragment in wqe
+ * @wqe: wqe for setting fragment
+ * @offset: offset value
+ * @sge: sge length and stag
+ */
+static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge)
+{
+       if (sge) {
+               set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO));
+               set_64bit_val(wqe, (offset + 8),
+                             (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) |
+                              LS_64(sge->stag, I40IWQPSQ_FRAG_STAG)));
+       }
+}
+
+/**
+ * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe
+ * @qp: hw qp ptr
+ * @wqe_idx: return wqe index
+ */
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx)
+{
+       u64 *wqe = NULL;
+       enum i40iw_status_code ret_code;
+
+       if (I40IW_RING_FULL_ERR(qp->rq_ring))
+               return NULL;
+
+       I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code);
+       if (ret_code)
+               return NULL;
+       if (!*wqe_idx)
+               qp->rwqe_polarity = !qp->rwqe_polarity;
+       /* rq_wqe_size_multiplier is no of qwords in one rq wqe */
+       wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem;
+
+       return wqe;
+}
+
+/**
+ * i40iw_rdma_write - rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp,
+                                              struct i40iw_post_sq_info *info,
+                                              bool post_sq)
+{
+       u64 header;
+       u64 *wqe;
+       struct i40iw_rdma_write *op_info;
+       u32 i, wqe_idx;
+       u32 total_size = 0, byte_off;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+
+       op_info = &info->op.rdma_write;
+       if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+
+       for (i = 0; i < op_info->num_lo_sges; i++)
+               total_size += op_info->lo_sg_list[i].len;
+
+       if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE)
+               return I40IW_ERR_QP_INVALID_MSG_SIZE;
+
+       read_fence |= info->read_fence;
+
+       ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size);
+       if (ret_code)
+               return ret_code;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+       set_64bit_val(wqe, 16,
+                     LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+       if (!op_info->rem_addr.stag)
+               return I40IW_ERR_BAD_STAG;
+
+       header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+                LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+                LS_64((op_info->num_lo_sges > 1 ?  (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) |
+                LS_64(read_fence, I40IWQPSQ_READFENCE) |
+                LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+                LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+                LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_set_fragment(wqe, 0, op_info->lo_sg_list);
+
+       for (i = 1; i < op_info->num_lo_sges; i++) {
+               byte_off = 32 + (i - 1) * 16;
+               i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]);
+       }
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_rdma_read - rdma read command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @inv_stag: flag for inv_stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp,
+                                             struct i40iw_post_sq_info *info,
+                                             bool inv_stag,
+                                             bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_rdma_read *op_info;
+       u64 header;
+       u32 wqe_idx;
+       enum i40iw_status_code ret_code;
+       u8 wqe_size;
+       bool local_fence = false;
+
+       op_info = &info->op.rdma_read;
+       ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size);
+       if (ret_code)
+               return ret_code;
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->lo_addr.len;
+       local_fence |= info->local_fence;
+
+       set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+       header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+                LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) |
+                LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+                LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+                LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+                LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_set_fragment(wqe, 0, &op_info->lo_addr);
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_send - rdma send command
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: stag_to_inv value
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp,
+                                        struct i40iw_post_sq_info *info,
+                                        u32 stag_to_inv,
+                                        bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_post_send *op_info;
+       u64 header;
+       u32 i, wqe_idx, total_size = 0, byte_off;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+
+       op_info = &info->op.send;
+       if (qp->max_sq_frag_cnt < op_info->num_sges)
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+
+       for (i = 0; i < op_info->num_sges; i++)
+               total_size += op_info->sg_list[i].len;
+       ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size);
+       if (ret_code)
+               return ret_code;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       read_fence |= info->read_fence;
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = total_size;
+       set_64bit_val(wqe, 16, 0);
+       header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+                LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+                LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0),
+                      I40IWQPSQ_ADDFRAGCNT) |
+                LS_64(read_fence, I40IWQPSQ_READFENCE) |
+                LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+                LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+                LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_set_fragment(wqe, 0, op_info->sg_list);
+
+       for (i = 1; i < op_info->num_sges; i++) {
+               byte_off = 32 + (i - 1) * 16;
+               i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]);
+       }
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_inline_rdma_write - inline rdma write operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp,
+                                                     struct i40iw_post_sq_info *info,
+                                                     bool post_sq)
+{
+       u64 *wqe;
+       u8 *dest, *src;
+       struct i40iw_inline_rdma_write *op_info;
+       u64 *push;
+       u64 header = 0;
+       u32 i, wqe_idx;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+
+       op_info = &info->op.inline_rdma_write;
+       if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+       ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+       if (ret_code)
+               return ret_code;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       read_fence |= info->read_fence;
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+       set_64bit_val(wqe, 16,
+                     LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO));
+
+       header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) |
+                LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
+                LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+                LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+                LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+                LS_64(read_fence, I40IWQPSQ_READFENCE) |
+                LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+                LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+                LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       dest = (u8 *)wqe;
+       src = (u8 *)(op_info->data);
+
+       if (op_info->len <= 16) {
+               for (i = 0; i < op_info->len; i++, src++, dest++)
+                       *dest = *src;
+       } else {
+               for (i = 0; i < 16; i++, src++, dest++)
+                       *dest = *src;
+               dest = (u8 *)wqe + 32;
+               for (; i < op_info->len; i++, src++, dest++)
+                       *dest = *src;
+       }
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       if (qp->push_db) {
+               push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+               memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+               i40iw_qp_ring_push_db(qp, wqe_idx);
+       } else {
+               if (post_sq)
+                       i40iw_qp_post_wr(qp);
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_inline_send - inline send operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @stag_to_inv: remote stag
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp,
+                                               struct i40iw_post_sq_info *info,
+                                               u32 stag_to_inv,
+                                               bool post_sq)
+{
+       u64 *wqe;
+       u8 *dest, *src;
+       struct i40iw_post_inline_send *op_info;
+       u64 header;
+       u32 wqe_idx, i;
+       enum i40iw_status_code ret_code;
+       bool read_fence = false;
+       u8 wqe_size;
+       u64 *push;
+
+       op_info = &info->op.inline_send;
+       if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE)
+               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+       ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size);
+       if (ret_code)
+               return ret_code;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       read_fence |= info->read_fence;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = op_info->len;
+       header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) |
+           LS_64(info->op_type, I40IWQPSQ_OPCODE) |
+           LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) |
+           LS_64(1, I40IWQPSQ_INLINEDATAFLAG) |
+           LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) |
+           LS_64(read_fence, I40IWQPSQ_READFENCE) |
+           LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
+           LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       dest = (u8 *)wqe;
+       src = (u8 *)(op_info->data);
+
+       if (op_info->len <= 16) {
+               for (i = 0; i < op_info->len; i++, src++, dest++)
+                       *dest = *src;
+       } else {
+               for (i = 0; i < 16; i++, src++, dest++)
+                       *dest = *src;
+               dest = (u8 *)wqe + 32;
+               for (; i < op_info->len; i++, src++, dest++)
+                       *dest = *src;
+       }
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       if (qp->push_db) {
+               push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20);
+               memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32);
+               i40iw_qp_ring_push_db(qp, wqe_idx);
+       } else {
+               if (post_sq)
+                       i40iw_qp_post_wr(qp);
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_stag_local_invalidate - stag invalidate operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp,
+                                                         struct i40iw_post_sq_info *info,
+                                                         bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_inv_local_stag *op_info;
+       u64 header;
+       u32 wqe_idx;
+       bool local_fence = false;
+
+       op_info = &info->op.inv_local_stag;
+       local_fence = info->local_fence;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+       set_64bit_val(wqe, 0, 0);
+       set_64bit_val(wqe, 8,
+                     LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG));
+       set_64bit_val(wqe, 16, 0);
+       header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) |
+           LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+           LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+           LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_mw_bind - Memory Window bind operation
+ * @qp: hw qp ptr
+ * @info: post sq information
+ * @post_sq: flag to post sq
+ */
+static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp,
+                                           struct i40iw_post_sq_info *info,
+                                           bool post_sq)
+{
+       u64 *wqe;
+       struct i40iw_bind_window *op_info;
+       u64 header;
+       u32 wqe_idx;
+       bool local_fence = false;
+
+       op_info = &info->op.bind_window;
+
+       local_fence |= info->local_fence;
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = info->wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+       set_64bit_val(wqe, 0, (uintptr_t)op_info->va);
+       set_64bit_val(wqe, 8,
+                     LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) |
+                     LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG));
+       set_64bit_val(wqe, 16, op_info->bind_length);
+       header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) |
+           LS_64(((op_info->enable_reads << 2) |
+                  (op_info->enable_writes << 3)),
+                 I40IWQPSQ_STAGRIGHTS) |
+           LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ?  1 : 0),
+                 I40IWQPSQ_VABASEDTO) |
+           LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
+           LS_64(local_fence, I40IWQPSQ_LOCALFENCE) |
+           LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_post_receive - post receive wqe
+ * @qp: hw qp ptr
+ * @info: post rq information
+ */
+static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp,
+                                                struct i40iw_post_rq_info *info)
+{
+       u64 *wqe;
+       u64 header;
+       u32 total_size = 0, wqe_idx, i, byte_off;
+
+       if (qp->max_rq_frag_cnt < info->num_sges)
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+       for (i = 0; i < info->num_sges; i++)
+               total_size += info->sg_list[i].len;
+       wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->rq_wrid_array[wqe_idx] = info->wr_id;
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0),
+                      I40IWQPSQ_ADDFRAGCNT) |
+           LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID);
+
+       i40iw_set_fragment(wqe, 0, info->sg_list);
+
+       for (i = 1; i < info->num_sges; i++) {
+               byte_off = 32 + (i - 1) * 16;
+               i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]);
+       }
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+
+       return 0;
+}
+
+/**
+ * i40iw_cq_request_notification - cq notification request (door bell)
+ * @cq: hw cq
+ * @cq_notify: notification type
+ */
+static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq,
+                                         enum i40iw_completion_notify cq_notify)
+{
+       u64 temp_val;
+       u16 sw_cq_sel;
+       u8 arm_next_se = 0;
+       u8 arm_next = 0;
+       u8 arm_seq_num;
+
+       get_64bit_val(cq->shadow_area, 32, &temp_val);
+       arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
+       arm_seq_num++;
+
+       sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
+       arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
+       arm_next_se |= 1;
+       if (cq_notify == IW_CQ_COMPL_EVENT)
+               arm_next = 1;
+       temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
+           LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
+           LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
+           LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT);
+
+       set_64bit_val(cq->shadow_area, 32, temp_val);
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       writel(cq->cq_id, cq->cqe_alloc_reg);
+}
+
+/**
+ * i40iw_cq_post_entries - update tail in shadow memory
+ * @cq: hw cq
+ * @count: # of entries processed
+ */
+static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq,
+                                                   u8 count)
+{
+       I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count);
+       set_64bit_val(cq->shadow_area, 0,
+                     I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+       return 0;
+}
+
+/**
+ * i40iw_cq_poll_completion - get cq completion info
+ * @cq: hw cq
+ * @info: cq poll information returned
+ * @post_cq: update cq tail
+ */
+static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq,
+                                                      struct i40iw_cq_poll_info *info,
+                                                      bool post_cq)
+{
+       u64 comp_ctx, qword0, qword2, qword3, wqe_qword;
+       u64 *cqe, *sw_wqe;
+       struct i40iw_qp_uk *qp;
+       struct i40iw_ring *pring = NULL;
+       u32 wqe_idx, q_type, array_idx = 0;
+       enum i40iw_status_code ret_code = 0;
+       enum i40iw_status_code ret_code2 = 0;
+       bool move_cq_head = true;
+       u8 polarity;
+       u8 addl_frag_cnt, addl_wqes = 0;
+
+       if (cq->avoid_mem_cflct)
+               cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
+       else
+               cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
+
+       get_64bit_val(cqe, 24, &qword3);
+       polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+       if (polarity != cq->polarity)
+               return I40IW_ERR_QUEUE_EMPTY;
+
+       q_type = (u8)RS_64(qword3, I40IW_CQ_SQ);
+       info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR);
+       info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP);
+       if (info->error) {
+               info->comp_status = I40IW_COMPL_STATUS_FLUSHED;
+               info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR);
+               info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR);
+       } else {
+               info->comp_status = I40IW_COMPL_STATUS_SUCCESS;
+       }
+
+       get_64bit_val(cqe, 0, &qword0);
+       get_64bit_val(cqe, 16, &qword2);
+
+       info->tcp_seq_num = (u8)RS_64(qword0, I40IWCQ_TCPSEQNUM);
+
+       info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID);
+
+       get_64bit_val(cqe, 8, &comp_ctx);
+
+       info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT);
+       info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ);
+
+       qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx;
+       wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX);
+       info->qp_handle = (i40iw_qp_handle)(unsigned long)qp;
+
+       if (q_type == I40IW_CQE_QTYPE_RQ) {
+               array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier;
+               if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) {
+                       info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
+                       array_idx = qp->rq_ring.tail;
+               } else {
+                       info->wr_id = qp->rq_wrid_array[array_idx];
+               }
+
+               info->op_type = I40IW_OP_TYPE_REC;
+               if (qword3 & I40IWCQ_STAG_MASK) {
+                       info->stag_invalid_set = true;
+                       info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG);
+               } else {
+                       info->stag_invalid_set = false;
+               }
+               info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN);
+               I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
+               pring = &qp->rq_ring;
+       } else {
+               if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) {
+                       info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
+                       info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len;
+
+                       info->op_type = (u8)RS_64(qword3, I40IWCQ_OP);
+                       sw_wqe = qp->sq_base[wqe_idx].elem;
+                       get_64bit_val(sw_wqe, 24, &wqe_qword);
+                       addl_frag_cnt =
+                           (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+                       i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+
+                       addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+                       I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes));
+               } else {
+                       do {
+                               u8 op_type;
+                               u32 tail;
+
+                               tail = qp->sq_ring.tail;
+                               sw_wqe = qp->sq_base[tail].elem;
+                               get_64bit_val(sw_wqe, 24, &wqe_qword);
+                               op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE);
+                               info->op_type = op_type;
+                               addl_frag_cnt = (u8)RS_64(wqe_qword, I40IWQPSQ_ADDFRAGCNT);
+                               i40iw_fragcnt_to_wqesize_sq(addl_frag_cnt + 1, &addl_wqes);
+                               addl_wqes = (addl_wqes / I40IW_QP_WQE_MIN_SIZE);
+                               I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes));
+                               if (op_type != I40IWQP_OP_NOP) {
+                                       info->wr_id = qp->sq_wrtrk_array[tail].wrid;
+                                       info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len;
+                                       break;
+                               }
+                       } while (1);
+               }
+               pring = &qp->sq_ring;
+       }
+
+       ret_code = 0;
+
+       if (!ret_code &&
+           (info->comp_status == I40IW_COMPL_STATUS_FLUSHED))
+               if (pring && (I40IW_RING_MORE_WORK(*pring)))
+                       move_cq_head = false;
+
+       if (move_cq_head) {
+               I40IW_RING_MOVE_HEAD(cq->cq_ring, ret_code2);
+
+               if (ret_code2 && !ret_code)
+                       ret_code = ret_code2;
+
+               if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0)
+                       cq->polarity ^= 1;
+
+               if (post_cq) {
+                       I40IW_RING_MOVE_TAIL(cq->cq_ring);
+                       set_64bit_val(cq->shadow_area, 0,
+                                     I40IW_RING_GETCURRENT_HEAD(cq->cq_ring));
+               }
+       } else {
+               if (info->is_srq)
+                       return ret_code;
+               qword3 &= ~I40IW_CQ_WQEIDX_MASK;
+               qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX);
+               set_64bit_val(cqe, 24, qword3);
+       }
+
+       return ret_code;
+}
+
+/**
+ * i40iw_get_wqe_shift - get shift count for maximum wqe size
+ * @wqdepth: depth of wq required.
+ * @sge: Maximum Scatter Gather Elements wqe
+ * @shift: Returns the shift needed based on sge
+ *
+ * Shift can be used to left shift the wqe size based on sge.
+ * If sge, == 1, shift =0 (wqe_size of 32 bytes), for sge=2 and 3, shift =1
+ * (64 bytes wqes) and 2 otherwise (128 bytes wqe).
+ */
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift)
+{
+       u32 size;
+
+       *shift = 0;
+       if (sge > 1)
+               *shift = (sge < 4) ? 1 : 2;
+
+       /* check if wqdepth is multiple of 2 or not */
+
+       if ((wqdepth < I40IWQP_SW_MIN_WQSIZE) || (wqdepth & (wqdepth - 1)))
+               return I40IW_ERR_INVALID_SIZE;
+
+       size = wqdepth << *shift;       /* multiple of 32 bytes count */
+       if (size > I40IWQP_SW_MAX_WQSIZE)
+               return I40IW_ERR_INVALID_SIZE;
+       return 0;
+}
+
+static struct i40iw_qp_uk_ops iw_qp_uk_ops = {
+       i40iw_qp_post_wr,
+       i40iw_qp_ring_push_db,
+       i40iw_rdma_write,
+       i40iw_rdma_read,
+       i40iw_send,
+       i40iw_inline_rdma_write,
+       i40iw_inline_send,
+       i40iw_stag_local_invalidate,
+       i40iw_mw_bind,
+       i40iw_post_receive,
+       i40iw_nop
+};
+
+static struct i40iw_cq_ops iw_cq_ops = {
+       i40iw_cq_request_notification,
+       i40iw_cq_poll_completion,
+       i40iw_cq_post_entries,
+       i40iw_clean_cq
+};
+
+static struct i40iw_device_uk_ops iw_device_uk_ops = {
+       i40iw_cq_uk_init,
+       i40iw_qp_uk_init,
+};
+
+/**
+ * i40iw_qp_uk_init - initialize shared qp
+ * @qp: hw qp (user and kernel)
+ * @info: qp initialization info
+ *
+ * initializes the vars used in both user and kernel mode.
+ * size of the wqe depends on numbers of max. fragements
+ * allowed. Then size of wqe * the number of wqes should be the
+ * amount of memory allocated for sq and rq. If srq is used,
+ * then rq_base will point to one rq wqe only (not the whole
+ * array of wqes)
+ */
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+                                       struct i40iw_qp_uk_init_info *info)
+{
+       enum i40iw_status_code ret_code = 0;
+       u32 sq_ring_size;
+       u8 sqshift, rqshift;
+
+       if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+
+       if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+       ret_code = i40iw_get_wqe_shift(info->sq_size, info->max_sq_frag_cnt, &sqshift);
+       if (ret_code)
+               return ret_code;
+
+       ret_code = i40iw_get_wqe_shift(info->rq_size, info->max_rq_frag_cnt, &rqshift);
+       if (ret_code)
+               return ret_code;
+
+       qp->sq_base = info->sq;
+       qp->rq_base = info->rq;
+       qp->shadow_area = info->shadow_area;
+       qp->sq_wrtrk_array = info->sq_wrtrk_array;
+       qp->rq_wrid_array = info->rq_wrid_array;
+
+       qp->wqe_alloc_reg = info->wqe_alloc_reg;
+       qp->qp_id = info->qp_id;
+
+       qp->sq_size = info->sq_size;
+       qp->push_db = info->push_db;
+       qp->push_wqe = info->push_wqe;
+
+       qp->max_sq_frag_cnt = info->max_sq_frag_cnt;
+       sq_ring_size = qp->sq_size << sqshift;
+
+       I40IW_RING_INIT(qp->sq_ring, sq_ring_size);
+       I40IW_RING_INIT(qp->initial_ring, sq_ring_size);
+       I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code);
+       I40IW_RING_MOVE_TAIL(qp->sq_ring);
+       I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code);
+       qp->swqe_polarity = 1;
+       qp->swqe_polarity_deferred = 1;
+       qp->rwqe_polarity = 0;
+
+       if (!qp->use_srq) {
+               qp->rq_size = info->rq_size;
+               qp->max_rq_frag_cnt = info->max_rq_frag_cnt;
+               qp->rq_wqe_size = rqshift;
+               I40IW_RING_INIT(qp->rq_ring, qp->rq_size);
+               qp->rq_wqe_size_multiplier = 4 << rqshift;
+       }
+       qp->ops = iw_qp_uk_ops;
+
+       return ret_code;
+}
+
+/**
+ * i40iw_cq_uk_init - initialize shared cq (user and kernel)
+ * @cq: hw cq
+ * @info: hw cq initialization info
+ */
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+                                       struct i40iw_cq_uk_init_info *info)
+{
+       if ((info->cq_size < I40IW_MIN_CQ_SIZE) ||
+           (info->cq_size > I40IW_MAX_CQ_SIZE))
+               return I40IW_ERR_INVALID_SIZE;
+       cq->cq_base = (struct i40iw_cqe *)info->cq_base;
+       cq->cq_id = info->cq_id;
+       cq->cq_size = info->cq_size;
+       cq->cqe_alloc_reg = info->cqe_alloc_reg;
+       cq->shadow_area = info->shadow_area;
+       cq->avoid_mem_cflct = info->avoid_mem_cflct;
+
+       I40IW_RING_INIT(cq->cq_ring, cq->cq_size);
+       cq->polarity = 1;
+       cq->ops = iw_cq_ops;
+
+       return 0;
+}
+
+/**
+ * i40iw_device_init_uk - setup routines for iwarp shared device
+ * @dev: iwarp shared (user and kernel)
+ */
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev)
+{
+       dev->ops_uk = iw_device_uk_ops;
+}
+
+/**
+ * i40iw_clean_cq - clean cq entries
+ * @ queue completion context
+ * @cq: cq to clean
+ */
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq)
+{
+       u64 *cqe;
+       u64 qword3, comp_ctx;
+       u32 cq_head;
+       u8 polarity, temp;
+
+       cq_head = cq->cq_ring.head;
+       temp = cq->polarity;
+       do {
+               if (cq->avoid_mem_cflct)
+                       cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
+               else
+                       cqe = (u64 *)&cq->cq_base[cq_head];
+               get_64bit_val(cqe, 24, &qword3);
+               polarity = (u8)RS_64(qword3, I40IW_CQ_VALID);
+
+               if (polarity != temp)
+                       break;
+
+               get_64bit_val(cqe, 8, &comp_ctx);
+               if ((void *)(unsigned long)comp_ctx == queue)
+                       set_64bit_val(cqe, 8, 0);
+
+               cq_head = (cq_head + 1) % cq->cq_ring.size;
+               if (!cq_head)
+                       temp ^= 1;
+       } while (true);
+}
+
+/**
+ * i40iw_nop - send a nop
+ * @qp: hw qp ptr
+ * @wr_id: work request id
+ * @signaled: flag if signaled for completion
+ * @post_sq: flag to post sq
+ */
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp,
+                                u64 wr_id,
+                                bool signaled,
+                                bool post_sq)
+{
+       u64 header, *wqe;
+       u32 wqe_idx;
+
+       wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE);
+       if (!wqe)
+               return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
+
+       qp->sq_wrtrk_array[wqe_idx].wrid = wr_id;
+       qp->sq_wrtrk_array[wqe_idx].wr_len = 0;
+       set_64bit_val(wqe, 0, 0);
+       set_64bit_val(wqe, 8, 0);
+       set_64bit_val(wqe, 16, 0);
+
+       header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
+           LS_64(signaled, I40IWQPSQ_SIGCOMPL) |
+           LS_64(qp->swqe_polarity, I40IWQPSQ_VALID);
+
+       wmb(); /* make sure WQE is populated before valid bit is set */
+
+       set_64bit_val(wqe, 24, header);
+       if (post_sq)
+               i40iw_qp_post_wr(qp);
+
+       return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size)
+{
+       switch (frag_cnt) {
+       case 0:
+       case 1:
+               *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+               break;
+       case 2:
+       case 3:
+               *wqe_size = 64;
+               break;
+       case 4:
+       case 5:
+               *wqe_size = 96;
+               break;
+       case 6:
+       case 7:
+               *wqe_size = 128;
+               break;
+       default:
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ
+ * @frag_cnt: number of fragments
+ * @wqe_size: size of rq wqe returned
+ */
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size)
+{
+       switch (frag_cnt) {
+       case 0:
+       case 1:
+               *wqe_size = 32;
+               break;
+       case 2:
+       case 3:
+               *wqe_size = 64;
+               break;
+       case 4:
+       case 5:
+       case 6:
+       case 7:
+               *wqe_size = 128;
+               break;
+       default:
+               return I40IW_ERR_INVALID_FRAG_COUNT;
+       }
+
+       return 0;
+}
+
+/**
+ * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size
+ * @data_size: data size for inline
+ * @wqe_size: size of sq wqe returned
+ */
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+                                                        u8 *wqe_size)
+{
+       if (data_size > I40IW_MAX_INLINE_DATA_SIZE)
+               return I40IW_ERR_INVALID_IMM_DATA_SIZE;
+
+       if (data_size <= 16)
+               *wqe_size = I40IW_QP_WQE_MIN_SIZE;
+       else if (data_size <= 48)
+               *wqe_size = 64;
+       else if (data_size <= 80)
+               *wqe_size = 96;
+       else
+               *wqe_size = 128;
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h
new file mode 100644 (file)
index 0000000..5cd971b
--- /dev/null
@@ -0,0 +1,442 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_USER_H
+#define I40IW_USER_H
+
+enum i40iw_device_capabilities_const {
+       I40IW_WQE_SIZE =                        4,
+       I40IW_CQP_WQE_SIZE =                    8,
+       I40IW_CQE_SIZE =                        4,
+       I40IW_EXTENDED_CQE_SIZE =               8,
+       I40IW_AEQE_SIZE =                       2,
+       I40IW_CEQE_SIZE =                       1,
+       I40IW_CQP_CTX_SIZE =                    8,
+       I40IW_SHADOW_AREA_SIZE =                8,
+       I40IW_CEQ_MAX_COUNT =                   256,
+       I40IW_QUERY_FPM_BUF_SIZE =              128,
+       I40IW_COMMIT_FPM_BUF_SIZE =             128,
+       I40IW_MIN_IW_QP_ID =                    1,
+       I40IW_MAX_IW_QP_ID =                    262143,
+       I40IW_MIN_CEQID =                       0,
+       I40IW_MAX_CEQID =                       256,
+       I40IW_MIN_CQID =                        0,
+       I40IW_MAX_CQID =                        131071,
+       I40IW_MIN_AEQ_ENTRIES =                 1,
+       I40IW_MAX_AEQ_ENTRIES =                 524287,
+       I40IW_MIN_CEQ_ENTRIES =                 1,
+       I40IW_MAX_CEQ_ENTRIES =                 131071,
+       I40IW_MIN_CQ_SIZE =                     1,
+       I40IW_MAX_CQ_SIZE =                     1048575,
+       I40IW_MAX_AEQ_ALLOCATE_COUNT =          255,
+       I40IW_DB_ID_ZERO =                      0,
+       I40IW_MAX_WQ_FRAGMENT_COUNT =           6,
+       I40IW_MAX_SGE_RD =                      1,
+       I40IW_MAX_OUTBOUND_MESSAGE_SIZE =       2147483647,
+       I40IW_MAX_INBOUND_MESSAGE_SIZE =        2147483647,
+       I40IW_MAX_PUSH_PAGE_COUNT =             4096,
+       I40IW_MAX_PE_ENABLED_VF_COUNT =         32,
+       I40IW_MAX_VF_FPM_ID =                   47,
+       I40IW_MAX_VF_PER_PF =                   127,
+       I40IW_MAX_SQ_PAYLOAD_SIZE =             2145386496,
+       I40IW_MAX_INLINE_DATA_SIZE =            112,
+       I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE =   112,
+       I40IW_MAX_IRD_SIZE =                    32,
+       I40IW_QPCTX_ENCD_MAXIRD =               3,
+       I40IW_MAX_WQ_ENTRIES =                  2048,
+       I40IW_MAX_ORD_SIZE =                    32,
+       I40IW_Q2_BUFFER_SIZE =                  (248 + 100),
+       I40IW_QP_CTX_SIZE =                     248
+};
+
+#define i40iw_handle void *
+#define i40iw_adapter_handle i40iw_handle
+#define i40iw_qp_handle i40iw_handle
+#define i40iw_cq_handle i40iw_handle
+#define i40iw_srq_handle i40iw_handle
+#define i40iw_pd_id i40iw_handle
+#define i40iw_stag_handle i40iw_handle
+#define i40iw_stag_index u32
+#define i40iw_stag u32
+#define i40iw_stag_key u8
+
+#define i40iw_tagged_offset u64
+#define i40iw_access_privileges u32
+#define i40iw_physical_fragment u64
+#define i40iw_address_list u64 *
+
+#define I40IW_CREATE_STAG(index, key)       (((index) << 8) + (key))
+
+#define I40IW_STAG_KEY_FROM_STAG(stag)      ((stag) && 0x000000FF)
+
+#define I40IW_STAG_INDEX_FROM_STAG(stag)    (((stag) && 0xFFFFFF00) >> 8)
+
+struct i40iw_qp_uk;
+struct i40iw_cq_uk;
+struct i40iw_srq_uk;
+struct i40iw_qp_uk_init_info;
+struct i40iw_cq_uk_init_info;
+struct i40iw_srq_uk_init_info;
+
+struct i40iw_sge {
+       i40iw_tagged_offset tag_off;
+       u32 len;
+       i40iw_stag stag;
+};
+
+#define i40iw_sgl struct i40iw_sge *
+
+struct i40iw_ring {
+       u32 head;
+       u32 tail;
+       u32 size;
+};
+
+struct i40iw_cqe {
+       u64 buf[I40IW_CQE_SIZE];
+};
+
+struct i40iw_extended_cqe {
+       u64 buf[I40IW_EXTENDED_CQE_SIZE];
+};
+
+struct i40iw_wqe {
+       u64 buf[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk_ops;
+
+enum i40iw_addressing_type {
+       I40IW_ADDR_TYPE_ZERO_BASED = 0,
+       I40IW_ADDR_TYPE_VA_BASED = 1,
+};
+
+#define I40IW_ACCESS_FLAGS_LOCALREAD           0x01
+#define I40IW_ACCESS_FLAGS_LOCALWRITE          0x02
+#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY     0x04
+#define I40IW_ACCESS_FLAGS_REMOTEREAD          0x05
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY    0x08
+#define I40IW_ACCESS_FLAGS_REMOTEWRITE         0x0a
+#define I40IW_ACCESS_FLAGS_BIND_WINDOW         0x10
+#define I40IW_ACCESS_FLAGS_ALL                 0x1F
+
+#define I40IW_OP_TYPE_RDMA_WRITE       0
+#define I40IW_OP_TYPE_RDMA_READ                1
+#define I40IW_OP_TYPE_SEND             3
+#define I40IW_OP_TYPE_SEND_INV         4
+#define I40IW_OP_TYPE_SEND_SOL         5
+#define I40IW_OP_TYPE_SEND_SOL_INV     6
+#define I40IW_OP_TYPE_REC              7
+#define I40IW_OP_TYPE_BIND_MW          8
+#define I40IW_OP_TYPE_FAST_REG_NSMR    9
+#define I40IW_OP_TYPE_INV_STAG         10
+#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11
+#define I40IW_OP_TYPE_NOP              12
+
+enum i40iw_completion_status {
+       I40IW_COMPL_STATUS_SUCCESS = 0,
+       I40IW_COMPL_STATUS_FLUSHED,
+       I40IW_COMPL_STATUS_INVALID_WQE,
+       I40IW_COMPL_STATUS_QP_CATASTROPHIC,
+       I40IW_COMPL_STATUS_REMOTE_TERMINATION,
+       I40IW_COMPL_STATUS_INVALID_STAG,
+       I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION,
+       I40IW_COMPL_STATUS_ACCESS_VIOLATION,
+       I40IW_COMPL_STATUS_INVALID_PD_ID,
+       I40IW_COMPL_STATUS_WRAP_ERROR,
+       I40IW_COMPL_STATUS_STAG_INVALID_PDID,
+       I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD,
+       I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED,
+       I40IW_COMPL_STATUS_STAG_NOT_INVALID,
+       I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE,
+       I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY,
+       I40IW_COMPL_STATUS_INVALID_FBO,
+       I40IW_COMPL_STATUS_INVALID_LENGTH,
+       I40IW_COMPL_STATUS_INVALID_ACCESS,
+       I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG,
+       I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS,
+       I40IW_COMPL_STATUS_INVALID_REGION,
+       I40IW_COMPL_STATUS_INVALID_WINDOW,
+       I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH
+};
+
+enum i40iw_completion_notify {
+       IW_CQ_COMPL_EVENT = 0,
+       IW_CQ_COMPL_SOLICITED = 1
+};
+
+struct i40iw_post_send {
+       i40iw_sgl sg_list;
+       u8 num_sges;
+};
+
+struct i40iw_post_inline_send {
+       void *data;
+       u32 len;
+};
+
+struct i40iw_post_send_w_inv {
+       i40iw_sgl sg_list;
+       u32 num_sges;
+       i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_post_inline_send_w_inv {
+       void *data;
+       u32 len;
+       i40iw_stag remote_stag_to_inv;
+};
+
+struct i40iw_rdma_write {
+       i40iw_sgl lo_sg_list;
+       u8 num_lo_sges;
+       struct i40iw_sge rem_addr;
+};
+
+struct i40iw_inline_rdma_write {
+       void *data;
+       u32 len;
+       struct i40iw_sge rem_addr;
+};
+
+struct i40iw_rdma_read {
+       struct i40iw_sge lo_addr;
+       struct i40iw_sge rem_addr;
+};
+
+struct i40iw_bind_window {
+       i40iw_stag mr_stag;
+       u64 bind_length;
+       void *va;
+       enum i40iw_addressing_type addressing_type;
+       bool enable_reads;
+       bool enable_writes;
+       i40iw_stag mw_stag;
+};
+
+struct i40iw_inv_local_stag {
+       i40iw_stag target_stag;
+};
+
+struct i40iw_post_sq_info {
+       u64 wr_id;
+       u8 op_type;
+       bool signaled;
+       bool read_fence;
+       bool local_fence;
+       bool inline_data;
+       bool defer_flag;
+       union {
+               struct i40iw_post_send send;
+               struct i40iw_post_send send_w_sol;
+               struct i40iw_post_send_w_inv send_w_inv;
+               struct i40iw_post_send_w_inv send_w_sol_inv;
+               struct i40iw_rdma_write rdma_write;
+               struct i40iw_rdma_read rdma_read;
+               struct i40iw_rdma_read rdma_read_inv;
+               struct i40iw_bind_window bind_window;
+               struct i40iw_inv_local_stag inv_local_stag;
+               struct i40iw_inline_rdma_write inline_rdma_write;
+               struct i40iw_post_inline_send inline_send;
+               struct i40iw_post_inline_send inline_send_w_sol;
+               struct i40iw_post_inline_send_w_inv inline_send_w_inv;
+               struct i40iw_post_inline_send_w_inv inline_send_w_sol_inv;
+       } op;
+};
+
+struct i40iw_post_rq_info {
+       u64 wr_id;
+       i40iw_sgl sg_list;
+       u32 num_sges;
+};
+
+struct i40iw_cq_poll_info {
+       u64 wr_id;
+       i40iw_qp_handle qp_handle;
+       u32 bytes_xfered;
+       u32 tcp_seq_num;
+       u32 qp_id;
+       i40iw_stag inv_stag;
+       enum i40iw_completion_status comp_status;
+       u16 major_err;
+       u16 minor_err;
+       u8 op_type;
+       bool stag_invalid_set;
+       bool push_dropped;
+       bool error;
+       bool is_srq;
+       bool solicited_event;
+};
+
+struct i40iw_qp_uk_ops {
+       void (*iw_qp_post_wr)(struct i40iw_qp_uk *);
+       void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32);
+       enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *,
+                                               struct i40iw_post_sq_info *, bool);
+       enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *,
+                                              struct i40iw_post_sq_info *, bool, bool);
+       enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *,
+                                         struct i40iw_post_sq_info *, u32, bool);
+       enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *,
+                                                      struct i40iw_post_sq_info *, bool);
+       enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *,
+                                                struct i40iw_post_sq_info *, u32, bool);
+       enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *,
+                                                          struct i40iw_post_sq_info *, bool);
+       enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *,
+                                            struct i40iw_post_sq_info *, bool);
+       enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *,
+                                                 struct i40iw_post_rq_info *);
+       enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool);
+};
+
+struct i40iw_cq_ops {
+       void (*iw_cq_request_notification)(struct i40iw_cq_uk *,
+                                          enum i40iw_completion_notify);
+       enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *,
+                                                       struct i40iw_cq_poll_info *, bool);
+       enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count);
+       void (*iw_cq_clean)(void *, struct i40iw_cq_uk *);
+};
+
+struct i40iw_dev_uk;
+
+struct i40iw_device_uk_ops {
+       enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *,
+                                                  struct i40iw_cq_uk_init_info *);
+       enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *,
+                                                  struct i40iw_qp_uk_init_info *);
+};
+
+struct i40iw_dev_uk {
+       struct i40iw_device_uk_ops ops_uk;
+};
+
+struct i40iw_sq_uk_wr_trk_info {
+       u64 wrid;
+       u64 wr_len;
+};
+
+struct i40iw_qp_quanta {
+       u64 elem[I40IW_WQE_SIZE];
+};
+
+struct i40iw_qp_uk {
+       struct i40iw_qp_quanta *sq_base;
+       struct i40iw_qp_quanta *rq_base;
+       u32 __iomem *wqe_alloc_reg;
+       struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+       u64 *rq_wrid_array;
+       u64 *shadow_area;
+       u32 *push_db;
+       u64 *push_wqe;
+       struct i40iw_ring sq_ring;
+       struct i40iw_ring rq_ring;
+       struct i40iw_ring initial_ring;
+       u32 qp_id;
+       u32 sq_size;
+       u32 rq_size;
+       struct i40iw_qp_uk_ops ops;
+       bool use_srq;
+       u8 swqe_polarity;
+       u8 swqe_polarity_deferred;
+       u8 rwqe_polarity;
+       u8 rq_wqe_size;
+       u8 rq_wqe_size_multiplier;
+       u8 max_sq_frag_cnt;
+       u8 max_rq_frag_cnt;
+       bool deferred_flag;
+};
+
+struct i40iw_cq_uk {
+       struct i40iw_cqe *cq_base;
+       u32 __iomem *cqe_alloc_reg;
+       u64 *shadow_area;
+       u32 cq_id;
+       u32 cq_size;
+       struct i40iw_ring cq_ring;
+       u8 polarity;
+       bool avoid_mem_cflct;
+
+       struct i40iw_cq_ops ops;
+};
+
+struct i40iw_qp_uk_init_info {
+       struct i40iw_qp_quanta *sq;
+       struct i40iw_qp_quanta *rq;
+       u32 __iomem *wqe_alloc_reg;
+       u64 *shadow_area;
+       struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array;
+       u64 *rq_wrid_array;
+       u32 *push_db;
+       u64 *push_wqe;
+       u32 qp_id;
+       u32 sq_size;
+       u32 rq_size;
+       u8 max_sq_frag_cnt;
+       u8 max_rq_frag_cnt;
+
+};
+
+struct i40iw_cq_uk_init_info {
+       u32 __iomem *cqe_alloc_reg;
+       struct i40iw_cqe *cq_base;
+       u64 *shadow_area;
+       u32 cq_size;
+       u32 cq_id;
+       bool avoid_mem_cflct;
+};
+
+void i40iw_device_init_uk(struct i40iw_dev_uk *dev);
+
+void i40iw_qp_post_wr(struct i40iw_qp_uk *qp);
+u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx,
+                               u8 wqe_size);
+u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx);
+u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx);
+
+enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq,
+                                       struct i40iw_cq_uk_init_info *info);
+enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp,
+                                       struct i40iw_qp_uk_init_info *info);
+
+void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq);
+enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
+                                bool signaled, bool post_sq);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u8 frag_cnt, u8 *wqe_size);
+enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size,
+                                                        u8 *wqe_size);
+enum i40iw_status_code i40iw_get_wqe_shift(u32 wqdepth, u8 sge, u8 *shift);
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
new file mode 100644 (file)
index 0000000..1ceec81
--- /dev/null
@@ -0,0 +1,1270 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_arp_table - manage arp table
+ * @iwdev: iwarp device
+ * @ip_addr: ip address for device
+ * @mac_addr: mac address ptr
+ * @action: modify, delete or add
+ */
+int i40iw_arp_table(struct i40iw_device *iwdev,
+                   __be32 *ip_addr,
+                   bool ipv4,
+                   u8 *mac_addr,
+                   u32 action)
+{
+       int arp_index;
+       int err;
+       u32 ip[4];
+
+       if (ipv4) {
+               memset(ip, 0, sizeof(ip));
+               ip[0] = *ip_addr;
+       } else {
+               memcpy(ip, ip_addr, sizeof(ip));
+       }
+
+       for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++)
+               if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0)
+                       break;
+       switch (action) {
+       case I40IW_ARP_ADD:
+               if (arp_index != iwdev->arp_table_size)
+                       return -1;
+
+               arp_index = 0;
+               err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps,
+                                          iwdev->arp_table_size,
+                                          (u32 *)&arp_index,
+                                          &iwdev->next_arp_index);
+
+               if (err)
+                       return err;
+
+               memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip));
+               ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr);
+               break;
+       case I40IW_ARP_RESOLVE:
+               if (arp_index == iwdev->arp_table_size)
+                       return -1;
+               break;
+       case I40IW_ARP_DELETE:
+               if (arp_index == iwdev->arp_table_size)
+                       return -1;
+               memset(iwdev->arp_table[arp_index].ip_addr, 0,
+                      sizeof(iwdev->arp_table[arp_index].ip_addr));
+               eth_zero_addr(iwdev->arp_table[arp_index].mac_addr);
+               i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index);
+               break;
+       default:
+               return -1;
+       }
+       return arp_index;
+}
+
+/**
+ * i40iw_wr32 - write 32 bits to hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ * @value: vvalue to write to register
+ */
+inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value)
+{
+       writel(value, hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_rd32 - read a 32 bit hw register
+ * @hw: hardware information including registers
+ * @reg: register offset
+ *
+ * Return value of register content
+ */
+inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg)
+{
+       return readl(hw->hw_addr + reg);
+}
+
+/**
+ * i40iw_inetaddr_event - system notifier for netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inetaddr_event(struct notifier_block *notifier,
+                        unsigned long event,
+                        void *ptr)
+{
+       struct in_ifaddr *ifa = ptr;
+       struct net_device *event_netdev = ifa->ifa_dev->dev;
+       struct net_device *netdev;
+       struct net_device *upper_dev;
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *hdl;
+       __be32 local_ipaddr;
+
+       hdl = i40iw_find_netdev(event_netdev);
+       if (!hdl)
+               return NOTIFY_DONE;
+
+       iwdev = &hdl->device;
+       netdev = iwdev->ldev->netdev;
+       upper_dev = netdev_master_upper_dev_get(netdev);
+       if (netdev != event_netdev)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_DOWN:
+               if (upper_dev)
+                       local_ipaddr =
+                               ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+               else
+                       local_ipaddr = ifa->ifa_address;
+               local_ipaddr = ntohl(local_ipaddr);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      &local_ipaddr,
+                                      true,
+                                      I40IW_ARP_DELETE);
+               return NOTIFY_OK;
+       case NETDEV_UP:
+               if (upper_dev)
+                       local_ipaddr =
+                               ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+               else
+                       local_ipaddr = ifa->ifa_address;
+               local_ipaddr = ntohl(local_ipaddr);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      &local_ipaddr,
+                                      true,
+                                      I40IW_ARP_ADD);
+               break;
+       case NETDEV_CHANGEADDR:
+               /* Add the address to the IP table */
+               if (upper_dev)
+                       local_ipaddr =
+                               ((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;
+               else
+                       local_ipaddr = ifa->ifa_address;
+
+               local_ipaddr = ntohl(local_ipaddr);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      &local_ipaddr,
+                                      true,
+                                      I40IW_ARP_ADD);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_inet6addr_event - system notifier for ipv6 netdev events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: if address
+ */
+int i40iw_inet6addr_event(struct notifier_block *notifier,
+                         unsigned long event,
+                         void *ptr)
+{
+       struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
+       struct net_device *event_netdev = ifa->idev->dev;
+       struct net_device *netdev;
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *hdl;
+       __be32 local_ipaddr6[4];
+
+       hdl = i40iw_find_netdev(event_netdev);
+       if (!hdl)
+               return NOTIFY_DONE;
+
+       iwdev = &hdl->device;
+       netdev = iwdev->ldev->netdev;
+       if (netdev != event_netdev)
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_DOWN:
+               i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      local_ipaddr6,
+                                      false,
+                                      I40IW_ARP_DELETE);
+               return NOTIFY_OK;
+       case NETDEV_UP:
+               /* Fall through */
+       case NETDEV_CHANGEADDR:
+               i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32);
+               i40iw_manage_arp_cache(iwdev,
+                                      netdev->dev_addr,
+                                      local_ipaddr6,
+                                      false,
+                                      I40IW_ARP_ADD);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_net_event - system notifier for net events
+ * @notfier: not used
+ * @event: event for notifier
+ * @ptr: neighbor
+ */
+int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr)
+{
+       struct neighbour *neigh = ptr;
+       struct i40iw_device *iwdev;
+       struct i40iw_handler *iwhdl;
+       __be32 *p;
+       u32 local_ipaddr[4];
+
+       switch (event) {
+       case NETEVENT_NEIGH_UPDATE:
+               iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev);
+               if (!iwhdl)
+                       return NOTIFY_DONE;
+               iwdev = &iwhdl->device;
+               p = (__be32 *)neigh->primary_key;
+               i40iw_copy_ip_ntohl(local_ipaddr, p);
+               if (neigh->nud_state & NUD_VALID) {
+                       i40iw_manage_arp_cache(iwdev,
+                                              neigh->ha,
+                                              local_ipaddr,
+                                              false,
+                                              I40IW_ARP_ADD);
+
+               } else {
+                       i40iw_manage_arp_cache(iwdev,
+                                              neigh->ha,
+                                              local_ipaddr,
+                                              false,
+                                              I40IW_ARP_DELETE);
+               }
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
+/**
+ * i40iw_get_cqp_request - get cqp struct
+ * @cqp: device cqp ptr
+ * @wait: cqp to be used in wait mode
+ */
+struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait)
+{
+       struct i40iw_cqp_request *cqp_request = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&cqp->req_lock, flags);
+       if (!list_empty(&cqp->cqp_avail_reqs)) {
+               cqp_request = list_entry(cqp->cqp_avail_reqs.next,
+                                        struct i40iw_cqp_request, list);
+               list_del_init(&cqp_request->list);
+       }
+       spin_unlock_irqrestore(&cqp->req_lock, flags);
+       if (!cqp_request) {
+               cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC);
+               if (cqp_request) {
+                       cqp_request->dynamic = true;
+                       INIT_LIST_HEAD(&cqp_request->list);
+                       init_waitqueue_head(&cqp_request->waitq);
+               }
+       }
+       if (!cqp_request) {
+               i40iw_pr_err("CQP Request Fail: No Memory");
+               return NULL;
+       }
+
+       if (wait) {
+               atomic_set(&cqp_request->refcount, 2);
+               cqp_request->waiting = true;
+       } else {
+               atomic_set(&cqp_request->refcount, 1);
+       }
+       return cqp_request;
+}
+
+/**
+ * i40iw_free_cqp_request - free cqp request
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request)
+{
+       unsigned long flags;
+
+       if (cqp_request->dynamic) {
+               kfree(cqp_request);
+       } else {
+               cqp_request->request_done = false;
+               cqp_request->callback_fcn = NULL;
+               cqp_request->waiting = false;
+
+               spin_lock_irqsave(&cqp->req_lock, flags);
+               list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs);
+               spin_unlock_irqrestore(&cqp->req_lock, flags);
+       }
+}
+
+/**
+ * i40iw_put_cqp_request - dec ref count and free if 0
+ * @cqp: cqp ptr
+ * @cqp_request: to be put back in cqp list
+ */
+void i40iw_put_cqp_request(struct i40iw_cqp *cqp,
+                          struct i40iw_cqp_request *cqp_request)
+{
+       if (atomic_dec_and_test(&cqp_request->refcount))
+               i40iw_free_cqp_request(cqp, cqp_request);
+}
+
+/**
+ * i40iw_free_qp - callback after destroy cqp completes
+ * @cqp_request: cqp request for destroy qp
+ * @num: not used
+ */
+static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
+{
+       struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param;
+       struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
+       struct i40iw_device *iwdev;
+       u32 qp_num = iwqp->ibqp.qp_num;
+
+       iwdev = iwqp->iwdev;
+
+       i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
+       i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+}
+
+/**
+ * i40iw_wait_event - wait for completion
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to wait
+ */
+static int i40iw_wait_event(struct i40iw_device *iwdev,
+                           struct i40iw_cqp_request *cqp_request)
+{
+       struct cqp_commands_info *info = &cqp_request->info;
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       bool cqp_error = false;
+       int err_code = 0;
+       int timeout_ret = 0;
+
+       timeout_ret = wait_event_timeout(cqp_request->waitq,
+                                        cqp_request->request_done,
+                                        I40IW_EVENT_TIMEOUT);
+       if (!timeout_ret) {
+               i40iw_pr_err("error cqp command 0x%x timed out ret = %d\n",
+                            info->cqp_cmd, timeout_ret);
+               err_code = -ETIME;
+               i40iw_request_reset(iwdev);
+               goto done;
+       }
+       cqp_error = cqp_request->compl_info.error;
+       if (cqp_error) {
+               i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n",
+                            info->cqp_cmd, cqp_request->compl_info.maj_err_code,
+                            cqp_request->compl_info.min_err_code);
+               err_code = -EPROTO;
+               goto done;
+       }
+done:
+       i40iw_put_cqp_request(iwcqp, cqp_request);
+       return err_code;
+}
+
+/**
+ * i40iw_handle_cqp_op - process cqp command
+ * @iwdev: iwarp device
+ * @cqp_request: cqp request to process
+ */
+enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
+                                          struct i40iw_cqp_request
+                                          *cqp_request)
+{
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+       struct cqp_commands_info *info = &cqp_request->info;
+       int err_code = 0;
+
+       status = i40iw_process_cqp_cmd(dev, info);
+       if (status) {
+               i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd);
+               i40iw_free_cqp_request(&iwdev->cqp, cqp_request);
+               return status;
+       }
+       if (cqp_request->waiting)
+               err_code = i40iw_wait_event(iwdev, cqp_request);
+       if (err_code)
+               status = I40IW_ERR_CQP_COMPL_ERROR;
+       return status;
+}
+
+/**
+ * i40iw_add_pdusecount - add pd refcount
+ * @iwpd: pd for refcount
+ */
+void i40iw_add_pdusecount(struct i40iw_pd *iwpd)
+{
+       atomic_inc(&iwpd->usecount);
+}
+
+/**
+ * i40iw_rem_pdusecount - decrement refcount for pd and free if 0
+ * @iwpd: pd for refcount
+ * @iwdev: iwarp device
+ */
+void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev)
+{
+       if (!atomic_dec_and_test(&iwpd->usecount))
+               return;
+       i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id);
+       kfree(iwpd);
+}
+
+/**
+ * i40iw_add_ref - add refcount for qp
+ * @ibqp: iqarp qp
+ */
+void i40iw_add_ref(struct ib_qp *ibqp)
+{
+       struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp;
+
+       atomic_inc(&iwqp->refcount);
+}
+
+/**
+ * i40iw_rem_ref - rem refcount for qp and free if 0
+ * @ibqp: iqarp qp
+ */
+void i40iw_rem_ref(struct ib_qp *ibqp)
+{
+       struct i40iw_qp *iwqp;
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_device *iwdev;
+       u32 qp_num;
+
+       iwqp = to_iwqp(ibqp);
+       if (!atomic_dec_and_test(&iwqp->refcount))
+               return;
+
+       iwdev = iwqp->iwdev;
+       qp_num = iwqp->ibqp.qp_num;
+       iwdev->qp_table[qp_num] = NULL;
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+       if (!cqp_request)
+               return;
+
+       cqp_request->callback_fcn = i40iw_free_qp;
+       cqp_request->param = (void *)&iwqp->sc_qp;
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_QP_DESTROY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp;
+       cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request;
+       cqp_info->in.u.qp_destroy.remove_hash_idx = true;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_get_qp - get qp address
+ * @device: iwarp device
+ * @qpn: qp number
+ */
+struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn)
+{
+       struct i40iw_device *iwdev = to_iwdev(device);
+
+       if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp))
+               return NULL;
+
+       return &iwdev->qp_table[qpn]->ibqp;
+}
+
+/**
+ * i40iw_debug_buf - print debug msg and buffer is mask set
+ * @dev: hardware control device structure
+ * @mask: mask to compare if to print debug buffer
+ * @buf: points buffer addr
+ * @size: saize of buffer to print
+ */
+void i40iw_debug_buf(struct i40iw_sc_dev *dev,
+                    enum i40iw_debug_flag mask,
+                    char *desc,
+                    u64 *buf,
+                    u32 size)
+{
+       u32 i;
+
+       if (!(dev->debug_mask & mask))
+               return;
+       i40iw_debug(dev, mask, "%s\n", desc);
+       i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf,
+                   (unsigned long long)virt_to_phys(buf));
+
+       for (i = 0; i < size; i += 8)
+               i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]);
+}
+
+/**
+ * i40iw_get_hw_addr - return hw addr
+ * @par: points to shared dev
+ */
+u8 __iomem *i40iw_get_hw_addr(void *par)
+{
+       struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par;
+
+       return dev->hw->hw_addr;
+}
+
+/**
+ * i40iw_remove_head - return head entry and remove from list
+ * @list: list for entry
+ */
+void *i40iw_remove_head(struct list_head *list)
+{
+       struct list_head *entry;
+
+       if (list_empty(list))
+               return NULL;
+
+       entry = (void *)list->next;
+       list_del(entry);
+       return (void *)entry;
+}
+
+/**
+ * i40iw_allocate_dma_mem - Memory alloc helper fn
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ * @alignment: what to align the allocation to
+ */
+enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw,
+                                             struct i40iw_dma_mem *mem,
+                                             u64 size,
+                                             u32 alignment)
+{
+       struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+       if (!mem)
+               return I40IW_ERR_PARAM;
+       mem->size = ALIGN(size, alignment);
+       mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size,
+                                     (dma_addr_t *)&mem->pa, GFP_KERNEL);
+       if (!mem->va)
+               return I40IW_ERR_NO_MEMORY;
+       return 0;
+}
+
+/**
+ * i40iw_free_dma_mem - Memory free helper fn
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ */
+void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem)
+{
+       struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
+
+       if (!mem || !mem->va)
+               return;
+
+       dma_free_coherent(&pcidev->dev, mem->size,
+                         mem->va, (dma_addr_t)mem->pa);
+       mem->va = NULL;
+}
+
+/**
+ * i40iw_allocate_virt_mem - virtual memory alloc helper fn
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to fill out
+ * @size: size of memory requested
+ */
+enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw,
+                                              struct i40iw_virt_mem *mem,
+                                              u32 size)
+{
+       if (!mem)
+               return I40IW_ERR_PARAM;
+
+       mem->size = size;
+       mem->va = kzalloc(size, GFP_KERNEL);
+
+       if (mem->va)
+               return 0;
+       else
+               return I40IW_ERR_NO_MEMORY;
+}
+
+/**
+ * i40iw_free_virt_mem - virtual memory free helper fn
+ * @hw:   pointer to the HW structure
+ * @mem:  ptr to mem struct to free
+ */
+enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw,
+                                          struct i40iw_virt_mem *mem)
+{
+       if (!mem)
+               return I40IW_ERR_PARAM;
+       kfree(mem->va);
+       mem->va = NULL;
+       return 0;
+}
+
+/**
+ * i40iw_cqp_sds_cmd - create cqp command for sd
+ * @dev: hardware control device structure
+ * @sd_info: information  for sd cqp
+ *
+ */
+enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev,
+                                        struct i40iw_update_sds_info *sdinfo)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+       cqp_info = &cqp_request->info;
+       memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo,
+              sizeof(cqp_info->in.u.update_pe_sds.info));
+       cqp_info->cqp_cmd = OP_UPDATE_PE_SDS;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.update_pe_sds.dev = dev;
+       cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Update SD's fail");
+       return status;
+}
+
+/**
+ * i40iw_term_modify_qp - modify qp for term message
+ * @qp: hardware control qp
+ * @next_state: qp's next state
+ * @term: terminate code
+ * @term_len: length
+ */
+void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len)
+{
+       struct i40iw_qp *iwqp;
+
+       iwqp = (struct i40iw_qp *)qp->back_qp;
+       i40iw_next_iw_state(iwqp, next_state, 0, term, term_len);
+};
+
+/**
+ * i40iw_terminate_done - after terminate is completed
+ * @qp: hardware control qp
+ * @timeout_occurred: indicates if terminate timer expired
+ */
+void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred)
+{
+       struct i40iw_qp *iwqp;
+       u32 next_iwarp_state = I40IW_QP_STATE_ERROR;
+       u8 hte = 0;
+       bool first_time;
+       unsigned long flags;
+
+       iwqp = (struct i40iw_qp *)qp->back_qp;
+       spin_lock_irqsave(&iwqp->lock, flags);
+       if (iwqp->hte_added) {
+               iwqp->hte_added = 0;
+               hte = 1;
+       }
+       first_time = !(qp->term_flags & I40IW_TERM_DONE);
+       qp->term_flags |= I40IW_TERM_DONE;
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+       if (first_time) {
+               if (!timeout_occurred)
+                       i40iw_terminate_del_timer(qp);
+               else
+                       next_iwarp_state = I40IW_QP_STATE_CLOSING;
+
+               i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0);
+               i40iw_cm_disconn(iwqp);
+       }
+}
+
+/**
+ * i40iw_terminate_imeout - timeout happened
+ * @context: points to iwarp qp
+ */
+static void i40iw_terminate_timeout(unsigned long context)
+{
+       struct i40iw_qp *iwqp = (struct i40iw_qp *)context;
+       struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp;
+
+       i40iw_terminate_done(qp, 1);
+}
+
+/**
+ * i40iw_terminate_start_timer - start terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp)
+{
+       struct i40iw_qp *iwqp;
+
+       iwqp = (struct i40iw_qp *)qp->back_qp;
+       init_timer(&iwqp->terminate_timer);
+       iwqp->terminate_timer.function = i40iw_terminate_timeout;
+       iwqp->terminate_timer.expires = jiffies + HZ;
+       iwqp->terminate_timer.data = (unsigned long)iwqp;
+       add_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_terminate_del_timer - delete terminate timeout
+ * @qp: hardware control qp
+ */
+void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp)
+{
+       struct i40iw_qp *iwqp;
+
+       iwqp = (struct i40iw_qp *)qp->back_qp;
+       del_timer(&iwqp->terminate_timer);
+}
+
+/**
+ * i40iw_cqp_generic_worker - generic worker for cqp
+ * @work: work pointer
+ */
+static void i40iw_cqp_generic_worker(struct work_struct *work)
+{
+       struct i40iw_virtchnl_work_info *work_info =
+           &((struct virtchnl_work *)work)->work_info;
+
+       if (work_info->worker_vf_dev)
+               work_info->callback_fcn(work_info->worker_vf_dev);
+}
+
+/**
+ * i40iw_cqp_spawn_worker - spawn worket thread
+ * @iwdev: device struct pointer
+ * @work_info: work request info
+ * @iw_vf_idx: virtual function index
+ */
+void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
+                           struct i40iw_virtchnl_work_info *work_info,
+                           u32 iw_vf_idx)
+{
+       struct virtchnl_work *work;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       work = &iwdev->virtchnl_w[iw_vf_idx];
+       memcpy(&work->work_info, work_info, sizeof(*work_info));
+       INIT_WORK(&work->work, i40iw_cqp_generic_worker);
+       queue_work(iwdev->virtchnl_wq, &work->work);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_worker -
+ * @work: work pointer for hmc info
+ */
+static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work)
+{
+       struct i40iw_cqp_request *cqp_request =
+           ((struct virtchnl_work *)work)->cqp_request;
+       struct i40iw_ccq_cqe_info ccq_cqe_info;
+       struct i40iw_hmc_fcn_info *hmcfcninfo =
+                       &cqp_request->info.in.u.manage_hmc_pm.info;
+       struct i40iw_device *iwdev =
+           (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev;
+
+       ccq_cqe_info.cqp = NULL;
+       ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code;
+       ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code;
+       ccq_cqe_info.op_code = cqp_request->compl_info.op_code;
+       ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val;
+       ccq_cqe_info.scratch = 0;
+       ccq_cqe_info.error = cqp_request->compl_info.error;
+       hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev,
+                                hmcfcninfo->cqp_callback_param, &ccq_cqe_info);
+       i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion
+ * @cqp_request: cqp request info struct for hmc fun
+ * @unused: unused param of callback
+ */
+static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request,
+                                             u32 unused)
+{
+       struct virtchnl_work *work;
+       struct i40iw_hmc_fcn_info *hmcfcninfo =
+           &cqp_request->info.in.u.manage_hmc_pm.info;
+       struct i40iw_device *iwdev =
+           (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->
+           back_dev;
+
+       if (hmcfcninfo && hmcfcninfo->callback_fcn) {
+               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__);
+               atomic_inc(&cqp_request->refcount);
+               work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx];
+               work->cqp_request = cqp_request;
+               INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker);
+               queue_work(iwdev->virtchnl_wq, &work->work);
+               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__);
+       } else {
+               i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__);
+       }
+}
+
+/**
+ * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc
+ * @dev: hardware control device structure
+ * @hmcfcninfo: info for hmc
+ */
+enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev,
+                                                   struct i40iw_hmc_fcn_info *hmcfcninfo)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__);
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+       cqp_info = &cqp_request->info;
+       cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback;
+       cqp_request->param = hmcfcninfo;
+       memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo,
+              sizeof(*hmcfcninfo));
+       cqp_info->in.u.manage_hmc_pm.dev = dev;
+       cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Manage HMC fail");
+       return status;
+}
+
+/**
+ * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm
+ * @iwdev: function device struct
+ * @values_mem: buffer for fpm
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev,
+                                                     struct i40iw_dma_mem *values_mem,
+                                                     u8 hmc_fn_id)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+       cqp_info = &cqp_request->info;
+       cqp_request->param = NULL;
+       cqp_info->in.u.query_fpm_values.cqp = dev->cqp;
+       cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa;
+       cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va;
+       cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id;
+       cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Query FPM fail");
+       return status;
+}
+
+/**
+ * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw
+ * @dev: hardware control device structure
+ * @values_mem: buffer with fpm values
+ * @hmc_fn_id: function id for fpm
+ */
+enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev,
+                                                      struct i40iw_dma_mem *values_mem,
+                                                      u8 hmc_fn_id)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return I40IW_ERR_NO_MEMORY;
+       cqp_info = &cqp_request->info;
+       cqp_request->param = NULL;
+       cqp_info->in.u.commit_fpm_values.cqp = dev->cqp;
+       cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa;
+       cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va;
+       cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id;
+       cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Commit FPM fail");
+       return status;
+}
+
+/**
+ * i40iw_vf_wait_vchnl_resp - wait for channel msg
+ * @iwdev: function's device struct
+ */
+enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev)
+{
+       struct i40iw_device *iwdev = dev->back_dev;
+       enum i40iw_status_code err_code = 0;
+       int timeout_ret;
+
+       i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n",
+                   __func__, __LINE__, dev, iwdev);
+       atomic_add(2, &iwdev->vchnl_msgs);
+       timeout_ret = wait_event_timeout(iwdev->vchnl_waitq,
+                                        (atomic_read(&iwdev->vchnl_msgs) == 1),
+                                        I40IW_VCHNL_EVENT_TIMEOUT);
+       atomic_dec(&iwdev->vchnl_msgs);
+       if (!timeout_ret) {
+               i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret);
+               err_code = I40IW_ERR_TIMEOUT;
+       }
+       return err_code;
+}
+
+/**
+ * i40iw_ieq_mpa_crc_ae - generate AE for crc error
+ * @dev: hardware control device structure
+ * @qp: hardware control qp
+ */
+void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
+{
+       struct i40iw_qp_flush_info info;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__);
+       memset(&info, 0, sizeof(info));
+       info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR;
+       info.generate_ae = true;
+       info.ae_source = 0x3;
+       (void)i40iw_hw_flush_wqes(iwdev, qp, &info, false);
+}
+
+/**
+ * i40iw_init_hash_desc - initialize hash for crc calculation
+ * @desc: cryption type
+ */
+enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc)
+{
+       struct crypto_shash *tfm;
+       struct shash_desc *tdesc;
+
+       tfm = crypto_alloc_shash("crc32c", 0, 0);
+       if (IS_ERR(tfm))
+               return I40IW_ERR_MPA_CRC;
+
+       tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm),
+                       GFP_KERNEL);
+       if (!tdesc) {
+               crypto_free_shash(tfm);
+               return I40IW_ERR_MPA_CRC;
+       }
+       tdesc->tfm = tfm;
+       *desc = tdesc;
+
+       return 0;
+}
+
+/**
+ * i40iw_free_hash_desc - free hash desc
+ * @desc: to be freed
+ */
+void i40iw_free_hash_desc(struct shash_desc *desc)
+{
+       if (desc) {
+               crypto_free_shash(desc->tfm);
+               kfree(desc);
+       }
+}
+
+/**
+ * i40iw_alloc_query_fpm_buf - allocate buffer for fpm
+ * @dev: hardware control device structure
+ * @mem: buffer ptr for fpm to be allocated
+ * @return: memory allocation status
+ */
+enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev,
+                                                struct i40iw_dma_mem *mem)
+{
+       enum i40iw_status_code status;
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+
+       status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE,
+                                      I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
+       return status;
+}
+
+/**
+ * i40iw_ieq_check_mpacrc - check if mpa crc is OK
+ * @desc: desc for hash
+ * @addr: address of buffer for crc
+ * @length: length of buffer
+ * @value: value to be compared
+ */
+enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc,
+                                             void *addr,
+                                             u32 length,
+                                             u32 value)
+{
+       u32 crc = 0;
+       int ret;
+       enum i40iw_status_code ret_code = 0;
+
+       crypto_shash_init(desc);
+       ret = crypto_shash_update(desc, addr, length);
+       if (!ret)
+               crypto_shash_final(desc, (u8 *)&crc);
+       if (crc != value) {
+               i40iw_pr_err("mpa crc check fail\n");
+               ret_code = I40IW_ERR_MPA_CRC;
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_ieq_get_qp - get qp based on quad in puda buffer
+ * @dev: hardware control device structure
+ * @buf: receive puda buffer on exception q
+ */
+struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev,
+                                    struct i40iw_puda_buf *buf)
+{
+       struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
+       struct i40iw_qp *iwqp;
+       struct i40iw_cm_node *cm_node;
+       u32 loc_addr[4], rem_addr[4];
+       u16 loc_port, rem_port;
+       struct ipv6hdr *ip6h;
+       struct iphdr *iph = (struct iphdr *)buf->iph;
+       struct tcphdr *tcph = (struct tcphdr *)buf->tcph;
+
+       if (iph->version == 4) {
+               memset(loc_addr, 0, sizeof(loc_addr));
+               loc_addr[0] = ntohl(iph->daddr);
+               memset(rem_addr, 0, sizeof(rem_addr));
+               rem_addr[0] = ntohl(iph->saddr);
+       } else {
+               ip6h = (struct ipv6hdr *)buf->iph;
+               i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32);
+               i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32);
+       }
+       loc_port = ntohs(tcph->dest);
+       rem_port = ntohs(tcph->source);
+
+       cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port,
+                                 loc_addr, false);
+       if (!cm_node)
+               return NULL;
+       iwqp = cm_node->iwqp;
+       return &iwqp->sc_qp;
+}
+
+/**
+ * i40iw_ieq_update_tcpip_info - update tcpip in the buffer
+ * @buf: puda to update
+ * @length: length of buffer
+ * @seqnum: seq number for tcp
+ */
+void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum)
+{
+       struct tcphdr *tcph;
+       struct iphdr *iph;
+       u16 iphlen;
+       u16 packetsize;
+       u8 *addr = (u8 *)buf->mem.va;
+
+       iphlen = (buf->ipv4) ? 20 : 40;
+       iph = (struct iphdr *)(addr + buf->maclen);
+       tcph = (struct tcphdr *)(addr + buf->maclen + iphlen);
+       packetsize = length + buf->tcphlen + iphlen;
+
+       iph->tot_len = htons(packetsize);
+       tcph->seq = htonl(seqnum);
+}
+
+/**
+ * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer
+ * @info: to get information
+ * @buf: puda buffer
+ */
+enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info,
+                                                struct i40iw_puda_buf *buf)
+{
+       struct iphdr *iph;
+       struct ipv6hdr *ip6h;
+       struct tcphdr *tcph;
+       u16 iphlen;
+       u16 pkt_len;
+       u8 *mem = (u8 *)buf->mem.va;
+       struct ethhdr *ethh = (struct ethhdr *)buf->mem.va;
+
+       if (ethh->h_proto == htons(0x8100)) {
+               info->vlan_valid = true;
+               buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK;
+       }
+       buf->maclen = (info->vlan_valid) ? 18 : 14;
+       iphlen = (info->l3proto) ? 40 : 20;
+       buf->ipv4 = (info->l3proto) ? false : true;
+       buf->iph = mem + buf->maclen;
+       iph = (struct iphdr *)buf->iph;
+
+       buf->tcph = buf->iph + iphlen;
+       tcph = (struct tcphdr *)buf->tcph;
+
+       if (buf->ipv4) {
+               pkt_len = ntohs(iph->tot_len);
+       } else {
+               ip6h = (struct ipv6hdr *)buf->iph;
+               pkt_len = ntohs(ip6h->payload_len) + iphlen;
+       }
+
+       buf->totallen = pkt_len + buf->maclen;
+
+       if (info->payload_len < buf->totallen - 4) {
+               i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n",
+                            info->payload_len, buf->totallen);
+               return I40IW_ERR_INVALID_SIZE;
+       }
+
+       buf->tcphlen = (tcph->doff) << 2;
+       buf->datalen = pkt_len - iphlen - buf->tcphlen;
+       buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL;
+       buf->hdrlen = buf->maclen + iphlen + buf->tcphlen;
+       buf->seqnum = ntohl(tcph->seq);
+       return 0;
+}
+
+/**
+ * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats
+ * @dev: hardware control device structure
+ */
+static void i40iw_hw_stats_timeout(unsigned long dev)
+{
+       struct i40iw_sc_dev *pf_dev = (struct i40iw_sc_dev *)dev;
+       struct i40iw_dev_pestat *pf_devstat = &pf_dev->dev_pestat;
+       struct i40iw_dev_pestat *vf_devstat = NULL;
+       u16 iw_vf_idx;
+       unsigned long flags;
+
+       /*PF*/
+       pf_devstat->ops.iw_hw_stat_read_all(pf_devstat, &pf_devstat->hw_stats);
+       for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {
+               spin_lock_irqsave(&pf_devstat->stats_lock, flags);
+               if (pf_dev->vf_dev[iw_vf_idx]) {
+                       if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) {
+                               vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->dev_pestat;
+                               vf_devstat->ops.iw_hw_stat_read_all(vf_devstat, &vf_devstat->hw_stats);
+                       }
+               }
+               spin_unlock_irqrestore(&pf_devstat->stats_lock, flags);
+       }
+
+       mod_timer(&pf_devstat->stats_timer,
+                 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_start_timer - Start periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_start_timer(struct i40iw_sc_dev *dev)
+{
+       struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+       init_timer(&devstat->stats_timer);
+       devstat->stats_timer.function = i40iw_hw_stats_timeout;
+       devstat->stats_timer.data = (unsigned long)dev;
+       mod_timer(&devstat->stats_timer,
+                 jiffies + msecs_to_jiffies(STATS_TIMER_DELAY));
+}
+
+/**
+ * i40iw_hw_stats_del_timer - Delete periodic stats timer
+ * @dev: hardware control device structure
+ */
+void i40iw_hw_stats_del_timer(struct i40iw_sc_dev *dev)
+{
+       struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+
+       del_timer_sync(&devstat->stats_timer);
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
new file mode 100644 (file)
index 0000000..1fe3b84
--- /dev/null
@@ -0,0 +1,2437 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/random.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <asm/byteorder.h>
+#include <net/ip.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/iw_cm.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include "i40iw.h"
+
+/**
+ * i40iw_query_device - get device attributes
+ * @ibdev: device pointer from stack
+ * @props: returning device attributes
+ * @udata: user data
+ */
+static int i40iw_query_device(struct ib_device *ibdev,
+                             struct ib_device_attr *props,
+                             struct ib_udata *udata)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+       if (udata->inlen || udata->outlen)
+               return -EINVAL;
+       memset(props, 0, sizeof(*props));
+       ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+       props->fw_ver = I40IW_FW_VERSION;
+       props->device_cap_flags = iwdev->device_cap_flags;
+       props->vendor_id = iwdev->vendor_id;
+       props->vendor_part_id = iwdev->vendor_part_id;
+       props->hw_ver = (u32)iwdev->sc_dev.hw_rev;
+       props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE;
+       props->max_qp = iwdev->max_qp;
+       props->max_qp_wr = (I40IW_MAX_WQ_ENTRIES >> 2) - 1;
+       props->max_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+       props->max_cq = iwdev->max_cq;
+       props->max_cqe = iwdev->max_cqe;
+       props->max_mr = iwdev->max_mr;
+       props->max_pd = iwdev->max_pd;
+       props->max_sge_rd = 1;
+       props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE;
+       props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+       props->atomic_cap = IB_ATOMIC_NONE;
+       props->max_map_per_fmr = 1;
+       return 0;
+}
+
+/**
+ * i40iw_query_port - get port attrubutes
+ * @ibdev: device pointer from stack
+ * @port: port number for query
+ * @props: returning device attributes
+ */
+static int i40iw_query_port(struct ib_device *ibdev,
+                           u8 port,
+                           struct ib_port_attr *props)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct net_device *netdev = iwdev->netdev;
+
+       memset(props, 0, sizeof(*props));
+
+       props->max_mtu = IB_MTU_4096;
+       if (netdev->mtu >= 4096)
+               props->active_mtu = IB_MTU_4096;
+       else if (netdev->mtu >= 2048)
+               props->active_mtu = IB_MTU_2048;
+       else if (netdev->mtu >= 1024)
+               props->active_mtu = IB_MTU_1024;
+       else if (netdev->mtu >= 512)
+               props->active_mtu = IB_MTU_512;
+       else
+               props->active_mtu = IB_MTU_256;
+
+       props->lid = 1;
+       if (netif_carrier_ok(iwdev->netdev))
+               props->state = IB_PORT_ACTIVE;
+       else
+               props->state = IB_PORT_DOWN;
+       props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+               IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
+       props->gid_tbl_len = 1;
+       props->pkey_tbl_len = 1;
+       props->active_width = IB_WIDTH_4X;
+       props->active_speed = 1;
+       props->max_msg_sz = 0x80000000;
+       return 0;
+}
+
+/**
+ * i40iw_alloc_ucontext - Allocate the user context data structure
+ * @ibdev: device pointer from stack
+ * @udata: user data
+ *
+ * This keeps track of all objects associated with a particular
+ * user-mode client.
+ */
+static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev,
+                                               struct ib_udata *udata)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct i40iw_alloc_ucontext_req req;
+       struct i40iw_alloc_ucontext_resp uresp;
+       struct i40iw_ucontext *ucontext;
+
+       if (ib_copy_from_udata(&req, udata, sizeof(req)))
+               return ERR_PTR(-EINVAL);
+
+       if (req.userspace_ver != I40IW_ABI_USERSPACE_VER) {
+               i40iw_pr_err("Invalid userspace driver version detected. Detected version %d, should be %d\n",
+                            req.userspace_ver, I40IW_ABI_USERSPACE_VER);
+               return ERR_PTR(-EINVAL);
+       }
+
+       memset(&uresp, 0, sizeof(uresp));
+       uresp.max_qps = iwdev->max_qp;
+       uresp.max_pds = iwdev->max_pd;
+       uresp.wq_size = iwdev->max_qp_wr * 2;
+       uresp.kernel_ver = I40IW_ABI_KERNEL_VER;
+
+       ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
+       if (!ucontext)
+               return ERR_PTR(-ENOMEM);
+
+       ucontext->iwdev = iwdev;
+
+       if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+               kfree(ucontext);
+               return ERR_PTR(-EFAULT);
+       }
+
+       INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
+       spin_lock_init(&ucontext->cq_reg_mem_list_lock);
+       INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
+       spin_lock_init(&ucontext->qp_reg_mem_list_lock);
+
+       return &ucontext->ibucontext;
+}
+
+/**
+ * i40iw_dealloc_ucontext - deallocate the user context data structure
+ * @context: user context created during alloc
+ */
+static int i40iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+       struct i40iw_ucontext *ucontext = to_ucontext(context);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+       if (!list_empty(&ucontext->cq_reg_mem_list)) {
+               spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+               return -EBUSY;
+       }
+       spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+       spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+       if (!list_empty(&ucontext->qp_reg_mem_list)) {
+               spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+               return -EBUSY;
+       }
+       spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+       kfree(ucontext);
+       return 0;
+}
+
+/**
+ * i40iw_mmap - user memory map
+ * @context: context created during alloc
+ * @vma: kernel info for user memory map
+ */
+static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
+{
+       struct i40iw_ucontext *ucontext;
+       u64 db_addr_offset;
+       u64 push_offset;
+
+       ucontext = to_ucontext(context);
+       if (ucontext->iwdev->sc_dev.is_pf) {
+               db_addr_offset = I40IW_DB_ADDR_OFFSET;
+               push_offset = I40IW_PUSH_OFFSET;
+               if (vma->vm_pgoff)
+                       vma->vm_pgoff += I40IW_PF_FIRST_PUSH_PAGE_INDEX - 1;
+       } else {
+               db_addr_offset = I40IW_VF_DB_ADDR_OFFSET;
+               push_offset = I40IW_VF_PUSH_OFFSET;
+               if (vma->vm_pgoff)
+                       vma->vm_pgoff += I40IW_VF_FIRST_PUSH_PAGE_INDEX - 1;
+       }
+
+       vma->vm_pgoff += db_addr_offset >> PAGE_SHIFT;
+
+       if (vma->vm_pgoff == (db_addr_offset >> PAGE_SHIFT)) {
+               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+               vma->vm_private_data = ucontext;
+       } else {
+               if ((vma->vm_pgoff - (push_offset >> PAGE_SHIFT)) % 2)
+                       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+               else
+                       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       }
+
+       if (io_remap_pfn_range(vma, vma->vm_start,
+                              vma->vm_pgoff + (pci_resource_start(ucontext->iwdev->ldev->pcidev, 0) >> PAGE_SHIFT),
+                              PAGE_SIZE, vma->vm_page_prot))
+               return -EAGAIN;
+
+       return 0;
+}
+
+/**
+ * i40iw_alloc_push_page - allocate a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+
+       if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX)
+               return;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return;
+
+       atomic_inc(&cqp_request->refcount);
+
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+       cqp_info->post_sq = 1;
+
+       cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+       cqp_info->in.u.manage_push_page.info.free_page = 0;
+       cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (!status)
+               qp->push_idx = cqp_request->compl_info.op_ret_val;
+       else
+               i40iw_pr_err("CQP-OP Push page fail");
+       i40iw_put_cqp_request(&iwdev->cqp, cqp_request);
+}
+
+/**
+ * i40iw_dealloc_push_page - free a push page for qp
+ * @iwdev: iwarp device
+ * @qp: hardware control qp
+ */
+static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp)
+{
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       enum i40iw_status_code status;
+
+       if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX)
+               return;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+       if (!cqp_request)
+               return;
+
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE;
+       cqp_info->post_sq = 1;
+
+       cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx;
+       cqp_info->in.u.manage_push_page.info.qs_handle = dev->qs_handle;
+       cqp_info->in.u.manage_push_page.info.free_page = 1;
+       cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp;
+       cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request;
+
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (!status)
+               qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+       else
+               i40iw_pr_err("CQP-OP Push page fail");
+}
+
+/**
+ * i40iw_alloc_pd - allocate protection domain
+ * @ibdev: device pointer from stack
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
+                                   struct ib_ucontext *context,
+                                   struct ib_udata *udata)
+{
+       struct i40iw_pd *iwpd;
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_alloc_pd_resp uresp;
+       struct i40iw_sc_pd *sc_pd;
+       u32 pd_id = 0;
+       int err;
+
+       err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
+                                  iwdev->max_pd, &pd_id, &iwdev->next_pd);
+       if (err) {
+               i40iw_pr_err("alloc resource failed\n");
+               return ERR_PTR(err);
+       }
+
+       iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
+       if (!iwpd) {
+               err = -ENOMEM;
+               goto free_res;
+       }
+
+       sc_pd = &iwpd->sc_pd;
+       dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id);
+
+       if (context) {
+               memset(&uresp, 0, sizeof(uresp));
+               uresp.pd_id = pd_id;
+               if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
+                       err = -EFAULT;
+                       goto error;
+               }
+       }
+
+       i40iw_add_pdusecount(iwpd);
+       return &iwpd->ibpd;
+error:
+       kfree(iwpd);
+free_res:
+       i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id);
+       return ERR_PTR(err);
+}
+
+/**
+ * i40iw_dealloc_pd - deallocate pd
+ * @ibpd: ptr of pd to be deallocated
+ */
+static int i40iw_dealloc_pd(struct ib_pd *ibpd)
+{
+       struct i40iw_pd *iwpd = to_iwpd(ibpd);
+       struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+
+       i40iw_rem_pdusecount(iwpd, iwdev);
+       return 0;
+}
+
+/**
+ * i40iw_qp_roundup - return round up qp ring size
+ * @wr_ring_size: ring size to round up
+ */
+static int i40iw_qp_roundup(u32 wr_ring_size)
+{
+       int scount = 1;
+
+       if (wr_ring_size < I40IWQP_SW_MIN_WQSIZE)
+               wr_ring_size = I40IWQP_SW_MIN_WQSIZE;
+
+       for (wr_ring_size--; scount <= 16; scount *= 2)
+               wr_ring_size |= wr_ring_size >> scount;
+       return ++wr_ring_size;
+}
+
+/**
+ * i40iw_get_pbl - Retrieve pbl from a list given a virtual
+ * address
+ * @va: user virtual address
+ * @pbl_list: pbl list to search in (QP's or CQ's)
+ */
+static struct i40iw_pbl *i40iw_get_pbl(unsigned long va,
+                                      struct list_head *pbl_list)
+{
+       struct i40iw_pbl *iwpbl;
+
+       list_for_each_entry(iwpbl, pbl_list, list) {
+               if (iwpbl->user_base == va) {
+                       list_del(&iwpbl->list);
+                       return iwpbl;
+               }
+       }
+       return NULL;
+}
+
+/**
+ * i40iw_free_qp_resources - free up memory resources for qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @qp_num: qp number assigned
+ */
+void i40iw_free_qp_resources(struct i40iw_device *iwdev,
+                            struct i40iw_qp *iwqp,
+                            u32 qp_num)
+{
+       i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
+       if (qp_num)
+               i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
+       i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem);
+       i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem);
+       kfree(iwqp->kqp.wrid_mem);
+       iwqp->kqp.wrid_mem = NULL;
+       kfree(iwqp->allocated_buffer);
+       iwqp->allocated_buffer = NULL;
+}
+
+/**
+ * i40iw_clean_cqes - clean cq entries for qp
+ * @iwqp: qp ptr (user or kernel)
+ * @iwcq: cq ptr
+ */
+static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq)
+{
+       struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk;
+
+       ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq);
+}
+
+/**
+ * i40iw_destroy_qp - destroy qp
+ * @ibqp: qp's ib pointer also to get to device's qp address
+ */
+static int i40iw_destroy_qp(struct ib_qp *ibqp)
+{
+       struct i40iw_qp *iwqp = to_iwqp(ibqp);
+
+       iwqp->destroyed = 1;
+
+       if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS)
+               i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0);
+
+       if (!iwqp->user_mode) {
+               if (iwqp->iwscq) {
+                       i40iw_clean_cqes(iwqp, iwqp->iwscq);
+                       if (iwqp->iwrcq != iwqp->iwscq)
+                               i40iw_clean_cqes(iwqp, iwqp->iwrcq);
+               }
+       }
+
+       i40iw_rem_ref(&iwqp->ibqp);
+       return 0;
+}
+
+/**
+ * i40iw_setup_virt_qp - setup for allocation of virtual qp
+ * @dev: iwarp device
+ * @qp: qp ptr
+ * @init_info: initialize info to return
+ */
+static int i40iw_setup_virt_qp(struct i40iw_device *iwdev,
+                              struct i40iw_qp *iwqp,
+                              struct i40iw_qp_init_info *init_info)
+{
+       struct i40iw_pbl *iwpbl = iwqp->iwpbl;
+       struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+
+       iwqp->page = qpmr->sq_page;
+       init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow);
+       if (iwpbl->pbl_allocated) {
+               init_info->virtual_map = true;
+               init_info->sq_pa = qpmr->sq_pbl.idx;
+               init_info->rq_pa = qpmr->rq_pbl.idx;
+       } else {
+               init_info->sq_pa = qpmr->sq_pbl.addr;
+               init_info->rq_pa = qpmr->rq_pbl.addr;
+       }
+       return 0;
+}
+
+/**
+ * i40iw_setup_kmode_qp - setup initialization for kernel mode qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: initialize info to return
+ */
+static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev,
+                               struct i40iw_qp *iwqp,
+                               struct i40iw_qp_init_info *info)
+{
+       struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem;
+       u32 sqdepth, rqdepth;
+       u32 sq_size, rq_size;
+       u8 sqshift, rqshift;
+       u32 size;
+       enum i40iw_status_code status;
+       struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
+
+       ukinfo->max_sq_frag_cnt = I40IW_MAX_WQ_FRAGMENT_COUNT;
+
+       sq_size = i40iw_qp_roundup(ukinfo->sq_size + 1);
+       rq_size = i40iw_qp_roundup(ukinfo->rq_size + 1);
+
+       status = i40iw_get_wqe_shift(sq_size, ukinfo->max_sq_frag_cnt, &sqshift);
+       if (!status)
+               status = i40iw_get_wqe_shift(rq_size, ukinfo->max_rq_frag_cnt, &rqshift);
+
+       if (status)
+               return -ENOSYS;
+
+       sqdepth = sq_size << sqshift;
+       rqdepth = rq_size << rqshift;
+
+       size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3);
+       iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL);
+
+       ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem;
+       if (!ukinfo->sq_wrtrk_array)
+               return -ENOMEM;
+
+       ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth];
+
+       size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE;
+       size += (I40IW_SHADOW_AREA_SIZE << 3);
+
+       status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256);
+       if (status) {
+               kfree(ukinfo->sq_wrtrk_array);
+               ukinfo->sq_wrtrk_array = NULL;
+               return -ENOMEM;
+       }
+
+       ukinfo->sq = mem->va;
+       info->sq_pa = mem->pa;
+
+       ukinfo->rq = &ukinfo->sq[sqdepth];
+       info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+       ukinfo->shadow_area = ukinfo->rq[rqdepth].elem;
+       info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE);
+
+       ukinfo->sq_size = sq_size;
+       ukinfo->rq_size = rq_size;
+       ukinfo->qp_id = iwqp->ibqp.qp_num;
+       return 0;
+}
+
+/**
+ * i40iw_create_qp - create qp
+ * @ibpd: ptr of pd
+ * @init_attr: attributes for qp
+ * @udata: user data for create qp
+ */
+static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
+                                    struct ib_qp_init_attr *init_attr,
+                                    struct ib_udata *udata)
+{
+       struct i40iw_pd *iwpd = to_iwpd(ibpd);
+       struct i40iw_device *iwdev = to_iwdev(ibpd->device);
+       struct i40iw_cqp *iwcqp = &iwdev->cqp;
+       struct i40iw_qp *iwqp;
+       struct i40iw_ucontext *ucontext;
+       struct i40iw_create_qp_req req;
+       struct i40iw_create_qp_resp uresp;
+       u32 qp_num = 0;
+       void *mem;
+       enum i40iw_status_code ret;
+       int err_code;
+       int sq_size;
+       int rq_size;
+       struct i40iw_sc_qp *qp;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_qp_init_info init_info;
+       struct i40iw_create_qp_info *qp_info;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       struct i40iw_qp_host_ctx_info *ctx_info;
+       struct i40iwarp_offload_info *iwarp_info;
+       unsigned long flags;
+
+       if (init_attr->create_flags)
+               return ERR_PTR(-EINVAL);
+       if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
+               init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+
+       memset(&init_info, 0, sizeof(init_info));
+
+       sq_size = init_attr->cap.max_send_wr;
+       rq_size = init_attr->cap.max_recv_wr;
+
+       init_info.qp_uk_init_info.sq_size = sq_size;
+       init_info.qp_uk_init_info.rq_size = rq_size;
+       init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge;
+       init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge;
+
+       mem = kzalloc(sizeof(*iwqp), GFP_KERNEL);
+       if (!mem)
+               return ERR_PTR(-ENOMEM);
+
+       iwqp = (struct i40iw_qp *)mem;
+       qp = &iwqp->sc_qp;
+       qp->back_qp = (void *)iwqp;
+       qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX;
+
+       iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info;
+
+       if (i40iw_allocate_dma_mem(dev->hw,
+                                  &iwqp->q2_ctx_mem,
+                                  I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE,
+                                  256)) {
+               i40iw_pr_err("dma_mem failed\n");
+               err_code = -ENOMEM;
+               goto error;
+       }
+
+       init_info.q2 = iwqp->q2_ctx_mem.va;
+       init_info.q2_pa = iwqp->q2_ctx_mem.pa;
+
+       init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE;
+       init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE;
+
+       err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp,
+                                       &qp_num, &iwdev->next_qp);
+       if (err_code) {
+               i40iw_pr_err("qp resource\n");
+               goto error;
+       }
+
+       iwqp->allocated_buffer = mem;
+       iwqp->iwdev = iwdev;
+       iwqp->iwpd = iwpd;
+       iwqp->ibqp.qp_num = qp_num;
+       qp = &iwqp->sc_qp;
+       iwqp->iwscq = to_iwcq(init_attr->send_cq);
+       iwqp->iwrcq = to_iwcq(init_attr->recv_cq);
+
+       iwqp->host_ctx.va = init_info.host_ctx;
+       iwqp->host_ctx.pa = init_info.host_ctx_pa;
+       iwqp->host_ctx.size = I40IW_QP_CTX_SIZE;
+
+       init_info.pd = &iwpd->sc_pd;
+       init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+       iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
+
+       if (init_attr->qp_type != IB_QPT_RC) {
+               err_code = -ENOSYS;
+               goto error;
+       }
+       if (iwdev->push_mode)
+               i40iw_alloc_push_page(iwdev, qp);
+       if (udata) {
+               err_code = ib_copy_from_udata(&req, udata, sizeof(req));
+               if (err_code) {
+                       i40iw_pr_err("ib_copy_from_data\n");
+                       goto error;
+               }
+               iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
+               if (ibpd->uobject && ibpd->uobject->context) {
+                       iwqp->user_mode = 1;
+                       ucontext = to_ucontext(ibpd->uobject->context);
+
+                       if (req.user_wqe_buffers) {
+                               spin_lock_irqsave(
+                                   &ucontext->qp_reg_mem_list_lock, flags);
+                               iwqp->iwpbl = i40iw_get_pbl(
+                                   (unsigned long)req.user_wqe_buffers,
+                                   &ucontext->qp_reg_mem_list);
+                               spin_unlock_irqrestore(
+                                   &ucontext->qp_reg_mem_list_lock, flags);
+
+                               if (!iwqp->iwpbl) {
+                                       err_code = -ENODATA;
+                                       i40iw_pr_err("no pbl info\n");
+                                       goto error;
+                               }
+                       }
+               }
+               err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info);
+       } else {
+               err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info);
+       }
+
+       if (err_code) {
+               i40iw_pr_err("setup qp failed\n");
+               goto error;
+       }
+
+       init_info.type = I40IW_QP_TYPE_IWARP;
+       ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info);
+       if (ret) {
+               err_code = -EPROTO;
+               i40iw_pr_err("qp_init fail\n");
+               goto error;
+       }
+       ctx_info = &iwqp->ctx_info;
+       iwarp_info = &iwqp->iwarp_info;
+       iwarp_info->rd_enable = true;
+       iwarp_info->wr_rdresp_en = true;
+       if (!iwqp->user_mode)
+               iwarp_info->priv_mode_en = true;
+       iwarp_info->ddp_ver = 1;
+       iwarp_info->rdmap_ver = 1;
+
+       ctx_info->iwarp_info_valid = true;
+       ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+       ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+       if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) {
+               ctx_info->push_mode_en = false;
+       } else {
+               ctx_info->push_mode_en = true;
+               ctx_info->push_idx = qp->push_idx;
+       }
+
+       ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+                                            (u64 *)iwqp->host_ctx.va,
+                                            ctx_info);
+       ctx_info->iwarp_info_valid = false;
+       cqp_request = i40iw_get_cqp_request(iwcqp, true);
+       if (!cqp_request) {
+               err_code = -ENOMEM;
+               goto error;
+       }
+       cqp_info = &cqp_request->info;
+       qp_info = &cqp_request->info.in.u.qp_create.info;
+
+       memset(qp_info, 0, sizeof(*qp_info));
+
+       qp_info->cq_num_valid = true;
+       qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE;
+
+       cqp_info->cqp_cmd = OP_QP_CREATE;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.qp_create.qp = qp;
+       cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request;
+       ret = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (ret) {
+               i40iw_pr_err("CQP-OP QP create fail");
+               err_code = -EACCES;
+               goto error;
+       }
+
+       i40iw_add_ref(&iwqp->ibqp);
+       spin_lock_init(&iwqp->lock);
+       iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
+       iwdev->qp_table[qp_num] = iwqp;
+       i40iw_add_pdusecount(iwqp->iwpd);
+       if (ibpd->uobject && udata) {
+               memset(&uresp, 0, sizeof(uresp));
+               uresp.actual_sq_size = sq_size;
+               uresp.actual_rq_size = rq_size;
+               uresp.qp_id = qp_num;
+               uresp.push_idx = qp->push_idx;
+               err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
+               if (err_code) {
+                       i40iw_pr_err("copy_to_udata failed\n");
+                       i40iw_destroy_qp(&iwqp->ibqp);
+                          /* let the completion of the qp destroy free the qp */
+                       return ERR_PTR(err_code);
+               }
+       }
+
+       return &iwqp->ibqp;
+error:
+       i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+       kfree(mem);
+       return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_query - query qp attributes
+ * @ibqp: qp pointer
+ * @attr: attributes pointer
+ * @attr_mask: Not used
+ * @init_attr: qp attributes to return
+ */
+static int i40iw_query_qp(struct ib_qp *ibqp,
+                         struct ib_qp_attr *attr,
+                         int attr_mask,
+                         struct ib_qp_init_attr *init_attr)
+{
+       struct i40iw_qp *iwqp = to_iwqp(ibqp);
+       struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+
+       attr->qp_access_flags = 0;
+       attr->cap.max_send_wr = qp->qp_uk.sq_size;
+       attr->cap.max_recv_wr = qp->qp_uk.rq_size;
+       attr->cap.max_recv_sge = 1;
+       attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
+       init_attr->event_handler = iwqp->ibqp.event_handler;
+       init_attr->qp_context = iwqp->ibqp.qp_context;
+       init_attr->send_cq = iwqp->ibqp.send_cq;
+       init_attr->recv_cq = iwqp->ibqp.recv_cq;
+       init_attr->srq = iwqp->ibqp.srq;
+       init_attr->cap = attr->cap;
+       return 0;
+}
+
+/**
+ * i40iw_hw_modify_qp - setup cqp for modify qp
+ * @iwdev: iwarp device
+ * @iwqp: qp ptr (user or kernel)
+ * @info: info for modify qp
+ * @wait: flag to wait or not for modify qp completion
+ */
+void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
+                       struct i40iw_modify_qp_info *info, bool wait)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_modify_qp_info *m_info;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait);
+       if (!cqp_request)
+               return;
+
+       cqp_info = &cqp_request->info;
+       m_info = &cqp_info->in.u.qp_modify.info;
+       memcpy(m_info, info, sizeof(*m_info));
+       cqp_info->cqp_cmd = OP_QP_MODIFY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
+       cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Modify QP fail");
+}
+
+/**
+ * i40iw_modify_qp - modify qp request
+ * @ibqp: qp's pointer for modify
+ * @attr: access attributes
+ * @attr_mask: state mask
+ * @udata: user data
+ */
+int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
+                   int attr_mask, struct ib_udata *udata)
+{
+       struct i40iw_qp *iwqp = to_iwqp(ibqp);
+       struct i40iw_device *iwdev = iwqp->iwdev;
+       struct i40iw_qp_host_ctx_info *ctx_info;
+       struct i40iwarp_offload_info *iwarp_info;
+       struct i40iw_modify_qp_info info;
+       u8 issue_modify_qp = 0;
+       u8 dont_wait = 0;
+       u32 err;
+       unsigned long flags;
+
+       memset(&info, 0, sizeof(info));
+       ctx_info = &iwqp->ctx_info;
+       iwarp_info = &iwqp->iwarp_info;
+
+       spin_lock_irqsave(&iwqp->lock, flags);
+
+       if (attr_mask & IB_QP_STATE) {
+               switch (attr->qp_state) {
+               case IB_QPS_INIT:
+               case IB_QPS_RTR:
+                       if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) {
+                               err = -EINVAL;
+                               goto exit;
+                       }
+                       if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) {
+                               info.next_iwarp_state = I40IW_QP_STATE_IDLE;
+                               issue_modify_qp = 1;
+                       }
+                       break;
+               case IB_QPS_RTS:
+                       if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) ||
+                           (!iwqp->cm_id)) {
+                               err = -EINVAL;
+                               goto exit;
+                       }
+
+                       issue_modify_qp = 1;
+                       iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED;
+                       iwqp->hte_added = 1;
+                       info.next_iwarp_state = I40IW_QP_STATE_RTS;
+                       info.tcp_ctx_valid = true;
+                       info.ord_valid = true;
+                       info.arp_cache_idx_valid = true;
+                       info.cq_num_valid = true;
+                       break;
+               case IB_QPS_SQD:
+                       if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) {
+                               err = 0;
+                               goto exit;
+                       }
+                       if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) ||
+                           (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) {
+                               err = 0;
+                               goto exit;
+                       }
+                       if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) {
+                               err = -EINVAL;
+                               goto exit;
+                       }
+                       info.next_iwarp_state = I40IW_QP_STATE_CLOSING;
+                       issue_modify_qp = 1;
+                       break;
+               case IB_QPS_SQE:
+                       if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) {
+                               err = -EINVAL;
+                               goto exit;
+                       }
+                       info.next_iwarp_state = I40IW_QP_STATE_TERMINATE;
+                       issue_modify_qp = 1;
+                       break;
+               case IB_QPS_ERR:
+               case IB_QPS_RESET:
+                       if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) {
+                               err = -EINVAL;
+                               goto exit;
+                       }
+                       if (iwqp->sc_qp.term_flags)
+                               del_timer(&iwqp->terminate_timer);
+                       info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+                       if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) &&
+                           iwdev->iw_status &&
+                           (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT))
+                               info.reset_tcp_conn = true;
+                       else
+                               dont_wait = 1;
+                       issue_modify_qp = 1;
+                       info.next_iwarp_state = I40IW_QP_STATE_ERROR;
+                       break;
+               default:
+                       err = -EINVAL;
+                       goto exit;
+               }
+
+               iwqp->ibqp_state = attr->qp_state;
+
+               if (issue_modify_qp)
+                       iwqp->iwarp_state = info.next_iwarp_state;
+               else
+                       info.next_iwarp_state = iwqp->iwarp_state;
+       }
+       if (attr_mask & IB_QP_ACCESS_FLAGS) {
+               ctx_info->iwarp_info_valid = true;
+               if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE)
+                       iwarp_info->wr_rdresp_en = true;
+               if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
+                       iwarp_info->wr_rdresp_en = true;
+               if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
+                       iwarp_info->rd_enable = true;
+               if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
+                       iwarp_info->bind_en = true;
+
+               if (iwqp->user_mode) {
+                       iwarp_info->rd_enable = true;
+                       iwarp_info->wr_rdresp_en = true;
+                       iwarp_info->priv_mode_en = false;
+               }
+       }
+
+       if (ctx_info->iwarp_info_valid) {
+               struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+               int ret;
+
+               ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
+               ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
+               ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp,
+                                                    (u64 *)iwqp->host_ctx.va,
+                                                    ctx_info);
+               if (ret) {
+                       i40iw_pr_err("setting QP context\n");
+                       err = -EINVAL;
+                       goto exit;
+               }
+       }
+
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+
+       if (issue_modify_qp)
+               i40iw_hw_modify_qp(iwdev, iwqp, &info, true);
+
+       if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) {
+               if (dont_wait) {
+                       if (iwqp->cm_id && iwqp->hw_tcp_state) {
+                               spin_lock_irqsave(&iwqp->lock, flags);
+                               iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
+                               iwqp->last_aeq = I40IW_AE_RESET_SENT;
+                               spin_unlock_irqrestore(&iwqp->lock, flags);
+                       }
+               }
+       }
+       return 0;
+exit:
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+       return err;
+}
+
+/**
+ * cq_free_resources - free up recources for cq
+ * @iwdev: iwarp device
+ * @iwcq: cq ptr
+ */
+static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq)
+{
+       struct i40iw_sc_cq *cq = &iwcq->sc_cq;
+
+       if (!iwcq->user_mode)
+               i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem);
+       i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id);
+}
+
+/**
+ * cq_wq_destroy - send cq destroy cqp
+ * @iwdev: iwarp device
+ * @cq: hardware control cq
+ */
+static void cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq)
+{
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return;
+
+       cqp_info = &cqp_request->info;
+
+       cqp_info->cqp_cmd = OP_CQ_DESTROY;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.cq_destroy.cq = cq;
+       cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP Destroy QP fail");
+}
+
+/**
+ * i40iw_destroy_cq - destroy cq
+ * @ib_cq: cq pointer
+ */
+static int i40iw_destroy_cq(struct ib_cq *ib_cq)
+{
+       struct i40iw_cq *iwcq;
+       struct i40iw_device *iwdev;
+       struct i40iw_sc_cq *cq;
+
+       if (!ib_cq) {
+               i40iw_pr_err("ib_cq == NULL\n");
+               return 0;
+       }
+
+       iwcq = to_iwcq(ib_cq);
+       iwdev = to_iwdev(ib_cq->device);
+       cq = &iwcq->sc_cq;
+       cq_wq_destroy(iwdev, cq);
+       cq_free_resources(iwdev, iwcq);
+       kfree(iwcq);
+       return 0;
+}
+
+/**
+ * i40iw_create_cq - create cq
+ * @ibdev: device pointer from stack
+ * @attr: attributes for cq
+ * @context: user context created during alloc
+ * @udata: user data
+ */
+static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
+                                    const struct ib_cq_init_attr *attr,
+                                    struct ib_ucontext *context,
+                                    struct ib_udata *udata)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct i40iw_cq *iwcq;
+       struct i40iw_pbl *iwpbl;
+       u32 cq_num = 0;
+       struct i40iw_sc_cq *cq;
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_cq_init_info info;
+       enum i40iw_status_code status;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
+       unsigned long flags;
+       int err_code;
+       int entries = attr->cqe;
+
+       if (entries > iwdev->max_cqe)
+               return ERR_PTR(-EINVAL);
+
+       iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
+       if (!iwcq)
+               return ERR_PTR(-ENOMEM);
+
+       memset(&info, 0, sizeof(info));
+
+       err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs,
+                                       iwdev->max_cq, &cq_num,
+                                       &iwdev->next_cq);
+       if (err_code)
+               goto error;
+
+       cq = &iwcq->sc_cq;
+       cq->back_cq = (void *)iwcq;
+       spin_lock_init(&iwcq->lock);
+
+       info.dev = dev;
+       ukinfo->cq_size = max(entries, 4);
+       ukinfo->cq_id = cq_num;
+       iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
+       info.ceqe_mask = 0;
+       info.ceq_id = 0;
+       info.ceq_id_valid = true;
+       info.ceqe_mask = 1;
+       info.type = I40IW_CQ_TYPE_IWARP;
+       if (context) {
+               struct i40iw_ucontext *ucontext;
+               struct i40iw_create_cq_req req;
+               struct i40iw_cq_mr *cqmr;
+
+               memset(&req, 0, sizeof(req));
+               iwcq->user_mode = true;
+               ucontext = to_ucontext(context);
+               if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req)))
+                       goto cq_free_resources;
+
+               spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+               iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer,
+                                     &ucontext->cq_reg_mem_list);
+               spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+               if (!iwpbl) {
+                       err_code = -EPROTO;
+                       goto cq_free_resources;
+               }
+
+               iwcq->iwpbl = iwpbl;
+               iwcq->cq_mem_size = 0;
+               cqmr = &iwpbl->cq_mr;
+               info.shadow_area_pa = cpu_to_le64(cqmr->shadow);
+               if (iwpbl->pbl_allocated) {
+                       info.virtual_map = true;
+                       info.pbl_chunk_size = 1;
+                       info.first_pm_pbl_idx = cqmr->cq_pbl.idx;
+               } else {
+                       info.cq_base_pa = cqmr->cq_pbl.addr;
+               }
+       } else {
+               /* Kmode allocations */
+               int rsize;
+               int shadow;
+
+               rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe);
+               rsize = round_up(rsize, 256);
+               shadow = I40IW_SHADOW_AREA_SIZE << 3;
+               status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem,
+                                               rsize + shadow, 256);
+               if (status) {
+                       err_code = -ENOMEM;
+                       goto cq_free_resources;
+               }
+               ukinfo->cq_base = iwcq->kmem.va;
+               info.cq_base_pa = iwcq->kmem.pa;
+               info.shadow_area_pa = info.cq_base_pa + rsize;
+               ukinfo->shadow_area = iwcq->kmem.va + rsize;
+       }
+
+       if (dev->iw_priv_cq_ops->cq_init(cq, &info)) {
+               i40iw_pr_err("init cq fail\n");
+               err_code = -EPROTO;
+               goto cq_free_resources;
+       }
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request) {
+               err_code = -ENOMEM;
+               goto cq_free_resources;
+       }
+
+       cqp_info = &cqp_request->info;
+       cqp_info->cqp_cmd = OP_CQ_CREATE;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.cq_create.cq = cq;
+       cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status) {
+               i40iw_pr_err("CQP-OP Create QP fail");
+               err_code = -EPROTO;
+               goto cq_free_resources;
+       }
+
+       if (context) {
+               struct i40iw_create_cq_resp resp;
+
+               memset(&resp, 0, sizeof(resp));
+               resp.cq_id = info.cq_uk_init_info.cq_id;
+               resp.cq_size = info.cq_uk_init_info.cq_size;
+               if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
+                       i40iw_pr_err("copy to user data\n");
+                       err_code = -EPROTO;
+                       goto cq_destroy;
+               }
+       }
+
+       return (struct ib_cq *)iwcq;
+
+cq_destroy:
+       cq_wq_destroy(iwdev, cq);
+cq_free_resources:
+       cq_free_resources(iwdev, iwcq);
+error:
+       kfree(iwcq);
+       return ERR_PTR(err_code);
+}
+
+/**
+ * i40iw_get_user_access - get hw access from IB access
+ * @acc: IB access to return hw access
+ */
+static inline u16 i40iw_get_user_access(int acc)
+{
+       u16 access = 0;
+
+       access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0;
+       access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0;
+       access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0;
+       access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0;
+       return access;
+}
+
+/**
+ * i40iw_free_stag - free stag resource
+ * @iwdev: iwarp device
+ * @stag: stag to free
+ */
+static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
+{
+       u32 stag_idx;
+
+       stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+       i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+}
+
+/**
+ * i40iw_create_stag - create random stag
+ * @iwdev: iwarp device
+ */
+static u32 i40iw_create_stag(struct i40iw_device *iwdev)
+{
+       u32 stag = 0;
+       u32 stag_index = 0;
+       u32 next_stag_index;
+       u32 driver_key;
+       u32 random;
+       u8 consumer_key;
+       int ret;
+
+       get_random_bytes(&random, sizeof(random));
+       consumer_key = (u8)random;
+
+       driver_key = random & ~iwdev->mr_stagmask;
+       next_stag_index = (random & iwdev->mr_stagmask) >> 8;
+       next_stag_index %= iwdev->max_mr;
+
+       ret = i40iw_alloc_resource(iwdev,
+                                  iwdev->allocated_mrs, iwdev->max_mr,
+                                  &stag_index, &next_stag_index);
+       if (!ret) {
+               stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
+               stag |= driver_key;
+               stag += (u32)consumer_key;
+       }
+       return stag;
+}
+
+/**
+ * i40iw_next_pbl_addr - Get next pbl address
+ * @palloc: Poiner to allocated pbles
+ * @pbl: pointer to a pble
+ * @pinfo: info pointer
+ * @idx: index
+ */
+static inline u64 *i40iw_next_pbl_addr(struct i40iw_pble_alloc *palloc,
+                                      u64 *pbl,
+                                      struct i40iw_pble_info **pinfo,
+                                      u32 *idx)
+{
+       *idx += 1;
+       if ((!(*pinfo)) || (*idx != (*pinfo)->cnt))
+               return ++pbl;
+       *idx = 0;
+       (*pinfo)++;
+       return (u64 *)(*pinfo)->addr;
+}
+
+/**
+ * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally
+ * @iwmr: iwmr for IB's user page addresses
+ * @pbl: ple pointer to save 1 level or 0 level pble
+ * @level: indicated level 0, 1 or 2
+ */
+static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr,
+                                   u64 *pbl,
+                                   enum i40iw_pble_level level)
+{
+       struct ib_umem *region = iwmr->region;
+       struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+       int chunk_pages, entry, pg_shift, i;
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       struct i40iw_pble_info *pinfo;
+       struct scatterlist *sg;
+       u32 idx = 0;
+
+       pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf;
+       pg_shift = ffs(region->page_size) - 1;
+       for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) {
+               chunk_pages = sg_dma_len(sg) >> pg_shift;
+               if ((iwmr->type == IW_MEMREG_TYPE_QP) &&
+                   !iwpbl->qp_mr.sq_page)
+                       iwpbl->qp_mr.sq_page = sg_page(sg);
+               for (i = 0; i < chunk_pages; i++) {
+                       *pbl = cpu_to_le64(sg_dma_address(sg) + region->page_size * i);
+                       pbl = i40iw_next_pbl_addr(palloc, pbl, &pinfo, &idx);
+               }
+       }
+}
+
+/**
+ * i40iw_setup_pbles - copy user pg address to pble's
+ * @iwdev: iwarp device
+ * @iwmr: mr pointer for this memory registration
+ * @use_pbles: flag if to use pble's or memory (level 0)
+ */
+static int i40iw_setup_pbles(struct i40iw_device *iwdev,
+                            struct i40iw_mr *iwmr,
+                            bool use_pbles)
+{
+       struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       struct i40iw_pble_info *pinfo;
+       u64 *pbl;
+       enum i40iw_status_code status;
+       enum i40iw_pble_level level = I40IW_LEVEL_1;
+
+       if (!use_pbles && (iwmr->page_cnt > MAX_SAVE_PAGE_ADDRS))
+               return -ENOMEM;
+
+       if (use_pbles) {
+               mutex_lock(&iwdev->pbl_mutex);
+               status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
+               mutex_unlock(&iwdev->pbl_mutex);
+               if (status)
+                       return -ENOMEM;
+
+               iwpbl->pbl_allocated = true;
+               level = palloc->level;
+               pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf;
+               pbl = (u64 *)pinfo->addr;
+       } else {
+               pbl = iwmr->pgaddrmem;
+       }
+
+       i40iw_copy_user_pgaddrs(iwmr, pbl, level);
+       return 0;
+}
+
+/**
+ * i40iw_handle_q_mem - handle memory for qp and cq
+ * @iwdev: iwarp device
+ * @req: information for q memory management
+ * @iwpbl: pble struct
+ * @use_pbles: flag to use pble
+ */
+static int i40iw_handle_q_mem(struct i40iw_device *iwdev,
+                             struct i40iw_mem_reg_req *req,
+                             struct i40iw_pbl *iwpbl,
+                             bool use_pbles)
+{
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       struct i40iw_mr *iwmr = iwpbl->iwmr;
+       struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr;
+       struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr;
+       struct i40iw_hmc_pble *hmc_p;
+       u64 *arr = iwmr->pgaddrmem;
+       int err;
+       int total;
+
+       total = req->sq_pages + req->rq_pages + req->cq_pages;
+
+       err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+       if (err)
+               return err;
+       if (use_pbles && (palloc->level != I40IW_LEVEL_1)) {
+               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+               iwpbl->pbl_allocated = false;
+               return -ENOMEM;
+       }
+
+       if (use_pbles)
+               arr = (u64 *)palloc->level1.addr;
+       if (req->reg_type == IW_MEMREG_TYPE_QP) {
+               hmc_p = &qpmr->sq_pbl;
+               qpmr->shadow = (dma_addr_t)arr[total];
+               if (use_pbles) {
+                       hmc_p->idx = palloc->level1.idx;
+                       hmc_p = &qpmr->rq_pbl;
+                       hmc_p->idx = palloc->level1.idx + req->sq_pages;
+               } else {
+                       hmc_p->addr = arr[0];
+                       hmc_p = &qpmr->rq_pbl;
+                       hmc_p->addr = arr[1];
+               }
+       } else {                /* CQ */
+               hmc_p = &cqmr->cq_pbl;
+               cqmr->shadow = (dma_addr_t)arr[total];
+               if (use_pbles)
+                       hmc_p->idx = palloc->level1.idx;
+               else
+                       hmc_p->addr = arr[0];
+       }
+       return err;
+}
+
+/**
+ * i40iw_hwreg_mr - send cqp command for memory registration
+ * @iwdev: iwarp device
+ * @iwmr: iwarp mr pointer
+ * @access: access for MR
+ */
+static int i40iw_hwreg_mr(struct i40iw_device *iwdev,
+                         struct i40iw_mr *iwmr,
+                         u16 access)
+{
+       struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+       struct i40iw_reg_ns_stag_info *stag_info;
+       struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       enum i40iw_status_code status;
+       int err = 0;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return -ENOMEM;
+
+       cqp_info = &cqp_request->info;
+       stag_info = &cqp_info->in.u.mr_reg_non_shared.info;
+       memset(stag_info, 0, sizeof(*stag_info));
+       stag_info->va = (void *)(unsigned long)iwpbl->user_base;
+       stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
+       stag_info->stag_key = (u8)iwmr->stag;
+       stag_info->total_len = iwmr->length;
+       stag_info->access_rights = access;
+       stag_info->pd_id = iwpd->sc_pd.pd_id;
+       stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED;
+
+       if (iwmr->page_cnt > 1) {
+               if (palloc->level == I40IW_LEVEL_1) {
+                       stag_info->first_pm_pbl_index = palloc->level1.idx;
+                       stag_info->chunk_size = 1;
+               } else {
+                       stag_info->first_pm_pbl_index = palloc->level2.root.idx;
+                       stag_info->chunk_size = 3;
+               }
+       } else {
+               stag_info->reg_addr_pa = iwmr->pgaddrmem[0];
+       }
+
+       cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev;
+       cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request;
+
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status) {
+               err = -ENOMEM;
+               i40iw_pr_err("CQP-OP MR Reg fail");
+       }
+       return err;
+}
+
+/**
+ * i40iw_reg_user_mr - Register a user memory region
+ * @pd: ptr of pd
+ * @start: virtual start address
+ * @length: length of mr
+ * @virt: virtual address
+ * @acc: access of mr
+ * @udata: user data
+ */
+static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
+                                      u64 start,
+                                      u64 length,
+                                      u64 virt,
+                                      int acc,
+                                      struct ib_udata *udata)
+{
+       struct i40iw_pd *iwpd = to_iwpd(pd);
+       struct i40iw_device *iwdev = to_iwdev(pd->device);
+       struct i40iw_ucontext *ucontext;
+       struct i40iw_pble_alloc *palloc;
+       struct i40iw_pbl *iwpbl;
+       struct i40iw_mr *iwmr;
+       struct ib_umem *region;
+       struct i40iw_mem_reg_req req;
+       u32 pbl_depth = 0;
+       u32 stag = 0;
+       u16 access;
+       u32 region_length;
+       bool use_pbles = false;
+       unsigned long flags;
+       int err = -ENOSYS;
+
+       region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
+       if (IS_ERR(region))
+               return (struct ib_mr *)region;
+
+       if (ib_copy_from_udata(&req, udata, sizeof(req))) {
+               ib_umem_release(region);
+               return ERR_PTR(-EFAULT);
+       }
+
+       iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+       if (!iwmr) {
+               ib_umem_release(region);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       iwpbl = &iwmr->iwpbl;
+       iwpbl->iwmr = iwmr;
+       iwmr->region = region;
+       iwmr->ibmr.pd = pd;
+       iwmr->ibmr.device = pd->device;
+       ucontext = to_ucontext(pd->uobject->context);
+       region_length = region->length + (start & 0xfff);
+       pbl_depth = region_length >> 12;
+       pbl_depth += (region_length & (4096 - 1)) ? 1 : 0;
+       iwmr->length = region->length;
+
+       iwpbl->user_base = virt;
+       palloc = &iwpbl->pble_alloc;
+
+       iwmr->type = req.reg_type;
+       iwmr->page_cnt = pbl_depth;
+
+       switch (req.reg_type) {
+       case IW_MEMREG_TYPE_QP:
+               use_pbles = ((req.sq_pages + req.rq_pages) > 2);
+               err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+               if (err)
+                       goto error;
+               spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+               list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+               spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+               break;
+       case IW_MEMREG_TYPE_CQ:
+               use_pbles = (req.cq_pages > 1);
+               err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+               if (err)
+                       goto error;
+
+               spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+               list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+               spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+               break;
+       case IW_MEMREG_TYPE_MEM:
+               access = I40IW_ACCESS_FLAGS_LOCALREAD;
+
+               use_pbles = (iwmr->page_cnt != 1);
+               err = i40iw_setup_pbles(iwdev, iwmr, use_pbles);
+               if (err)
+                       goto error;
+
+               access |= i40iw_get_user_access(acc);
+               stag = i40iw_create_stag(iwdev);
+               if (!stag) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               iwmr->stag = stag;
+               iwmr->ibmr.rkey = stag;
+               iwmr->ibmr.lkey = stag;
+
+               err = i40iw_hwreg_mr(iwdev, iwmr, access);
+               if (err) {
+                       i40iw_free_stag(iwdev, stag);
+                       goto error;
+               }
+               break;
+       default:
+               goto error;
+       }
+
+       iwmr->type = req.reg_type;
+       if (req.reg_type == IW_MEMREG_TYPE_MEM)
+               i40iw_add_pdusecount(iwpd);
+       return &iwmr->ibmr;
+
+error:
+       if (palloc->level != I40IW_LEVEL_0)
+               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+       ib_umem_release(region);
+       kfree(iwmr);
+       return ERR_PTR(err);
+}
+
+/**
+ * i40iw_reg_phys_mr - register kernel physical memory
+ * @pd: ibpd pointer
+ * @addr: physical address of memory to register
+ * @size: size of memory to register
+ * @acc: Access rights
+ * @iova_start: start of virtual address for physical buffers
+ */
+struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd,
+                               u64 addr,
+                               u64 size,
+                               int acc,
+                               u64 *iova_start)
+{
+       struct i40iw_pd *iwpd = to_iwpd(pd);
+       struct i40iw_device *iwdev = to_iwdev(pd->device);
+       struct i40iw_pbl *iwpbl;
+       struct i40iw_mr *iwmr;
+       enum i40iw_status_code status;
+       u32 stag;
+       u16 access = I40IW_ACCESS_FLAGS_LOCALREAD;
+       int ret;
+
+       iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+       if (!iwmr)
+               return ERR_PTR(-ENOMEM);
+       iwmr->ibmr.pd = pd;
+       iwmr->ibmr.device = pd->device;
+       iwpbl = &iwmr->iwpbl;
+       iwpbl->iwmr = iwmr;
+       iwmr->type = IW_MEMREG_TYPE_MEM;
+       iwpbl->user_base = *iova_start;
+       stag = i40iw_create_stag(iwdev);
+       if (!stag) {
+               ret = -EOVERFLOW;
+               goto err;
+       }
+       access |= i40iw_get_user_access(acc);
+       iwmr->stag = stag;
+       iwmr->ibmr.rkey = stag;
+       iwmr->ibmr.lkey = stag;
+       iwmr->page_cnt = 1;
+       iwmr->pgaddrmem[0]  = addr;
+       status = i40iw_hwreg_mr(iwdev, iwmr, access);
+       if (status) {
+               i40iw_free_stag(iwdev, stag);
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       i40iw_add_pdusecount(iwpd);
+       return &iwmr->ibmr;
+ err:
+       kfree(iwmr);
+       return ERR_PTR(ret);
+}
+
+/**
+ * i40iw_get_dma_mr - register physical mem
+ * @pd: ptr of pd
+ * @acc: access for memory
+ */
+static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc)
+{
+       u64 kva = 0;
+
+       return i40iw_reg_phys_mr(pd, 0, 0xffffffffffULL, acc, &kva);
+}
+
+/**
+ * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP
+ * @iwmr: iwmr for IB's user page addresses
+ * @ucontext: ptr to user context
+ */
+static void i40iw_del_memlist(struct i40iw_mr *iwmr,
+                             struct i40iw_ucontext *ucontext)
+{
+       struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+       unsigned long flags;
+
+       switch (iwmr->type) {
+       case IW_MEMREG_TYPE_CQ:
+               spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+               if (!list_empty(&ucontext->cq_reg_mem_list))
+                       list_del(&iwpbl->list);
+               spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+               break;
+       case IW_MEMREG_TYPE_QP:
+               spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+               if (!list_empty(&ucontext->qp_reg_mem_list))
+                       list_del(&iwpbl->list);
+               spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+               break;
+       default:
+               break;
+       }
+}
+
+/**
+ * i40iw_dereg_mr - deregister mr
+ * @ib_mr: mr ptr for dereg
+ */
+static int i40iw_dereg_mr(struct ib_mr *ib_mr)
+{
+       struct ib_pd *ibpd = ib_mr->pd;
+       struct i40iw_pd *iwpd = to_iwpd(ibpd);
+       struct i40iw_mr *iwmr = to_iwmr(ib_mr);
+       struct i40iw_device *iwdev = to_iwdev(ib_mr->device);
+       enum i40iw_status_code status;
+       struct i40iw_dealloc_stag_info *info;
+       struct i40iw_pbl *iwpbl = &iwmr->iwpbl;
+       struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc;
+       struct i40iw_cqp_request *cqp_request;
+       struct cqp_commands_info *cqp_info;
+       u32 stag_idx;
+
+       if (iwmr->region)
+               ib_umem_release(iwmr->region);
+
+       if (iwmr->type != IW_MEMREG_TYPE_MEM) {
+               if (ibpd->uobject) {
+                       struct i40iw_ucontext *ucontext;
+
+                       ucontext = to_ucontext(ibpd->uobject->context);
+                       i40iw_del_memlist(iwmr, ucontext);
+               }
+               if (iwpbl->pbl_allocated)
+                       i40iw_free_pble(iwdev->pble_rsrc, palloc);
+               kfree(iwpbl->iwmr);
+               iwpbl->iwmr = NULL;
+               return 0;
+       }
+
+       cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true);
+       if (!cqp_request)
+               return -ENOMEM;
+
+       cqp_info = &cqp_request->info;
+       info = &cqp_info->in.u.dealloc_stag.info;
+       memset(info, 0, sizeof(*info));
+
+       info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff);
+       info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT);
+       stag_idx = info->stag_idx;
+       info->mr = true;
+       if (iwpbl->pbl_allocated)
+               info->dealloc_pbl = true;
+
+       cqp_info->cqp_cmd = OP_DEALLOC_STAG;
+       cqp_info->post_sq = 1;
+       cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev;
+       cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
+       status = i40iw_handle_cqp_op(iwdev, cqp_request);
+       if (status)
+               i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx);
+       i40iw_rem_pdusecount(iwpd, iwdev);
+       i40iw_free_stag(iwdev, iwmr->stag);
+       if (iwpbl->pbl_allocated)
+               i40iw_free_pble(iwdev->pble_rsrc, palloc);
+       kfree(iwmr);
+       return 0;
+}
+
+/**
+ * i40iw_show_rev
+ */
+static ssize_t i40iw_show_rev(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct i40iw_ib_device *iwibdev = container_of(dev,
+                                                      struct i40iw_ib_device,
+                                                      ibdev.dev);
+       u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev;
+
+       return sprintf(buf, "%x\n", hw_rev);
+}
+
+/**
+ * i40iw_show_fw_ver
+ */
+static ssize_t i40iw_show_fw_ver(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       u32 firmware_version = I40IW_FW_VERSION;
+
+       return sprintf(buf, "%u.%u\n", firmware_version,
+                      (firmware_version & 0x000000ff));
+}
+
+/**
+ * i40iw_show_hca
+ */
+static ssize_t i40iw_show_hca(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "I40IW\n");
+}
+
+/**
+ * i40iw_show_board
+ */
+static ssize_t i40iw_show_board(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
+}
+
+static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
+static DEVICE_ATTR(fw_ver, S_IRUGO, i40iw_show_fw_ver, NULL);
+static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
+static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+
+static struct device_attribute *i40iw_dev_attributes[] = {
+       &dev_attr_hw_rev,
+       &dev_attr_fw_ver,
+       &dev_attr_hca_type,
+       &dev_attr_board_id
+};
+
+/**
+ * i40iw_copy_sg_list - copy sg list for qp
+ * @sg_list: copied into sg_list
+ * @sgl: copy from sgl
+ * @num_sges: count of sg entries
+ */
+static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges)
+{
+       unsigned int i;
+
+       for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) {
+               sg_list[i].tag_off = sgl[i].addr;
+               sg_list[i].len = sgl[i].length;
+               sg_list[i].stag = sgl[i].lkey;
+       }
+}
+
+/**
+ * i40iw_post_send -  kernel application wr
+ * @ibqp: qp ptr for wr
+ * @ib_wr: work request ptr
+ * @bad_wr: return of bad wr if err
+ */
+static int i40iw_post_send(struct ib_qp *ibqp,
+                          struct ib_send_wr *ib_wr,
+                          struct ib_send_wr **bad_wr)
+{
+       struct i40iw_qp *iwqp;
+       struct i40iw_qp_uk *ukqp;
+       struct i40iw_post_sq_info info;
+       enum i40iw_status_code ret;
+       int err = 0;
+       unsigned long flags;
+
+       iwqp = (struct i40iw_qp *)ibqp;
+       ukqp = &iwqp->sc_qp.qp_uk;
+
+       spin_lock_irqsave(&iwqp->lock, flags);
+       while (ib_wr) {
+               memset(&info, 0, sizeof(info));
+               info.wr_id = (u64)(ib_wr->wr_id);
+               if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all)
+                       info.signaled = true;
+               if (ib_wr->send_flags & IB_SEND_FENCE)
+                       info.read_fence = true;
+
+               switch (ib_wr->opcode) {
+               case IB_WR_SEND:
+                       if (ib_wr->send_flags & IB_SEND_SOLICITED)
+                               info.op_type = I40IW_OP_TYPE_SEND_SOL;
+                       else
+                               info.op_type = I40IW_OP_TYPE_SEND;
+
+                       if (ib_wr->send_flags & IB_SEND_INLINE) {
+                               info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+                               info.op.inline_send.len = ib_wr->sg_list[0].length;
+                               ret = ukqp->ops.iw_inline_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+                       } else {
+                               info.op.send.num_sges = ib_wr->num_sge;
+                               info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list;
+                               ret = ukqp->ops.iw_send(ukqp, &info, rdma_wr(ib_wr)->rkey, false);
+                       }
+
+                       if (ret)
+                               err = -EIO;
+                       break;
+               case IB_WR_RDMA_WRITE:
+                       info.op_type = I40IW_OP_TYPE_RDMA_WRITE;
+
+                       if (ib_wr->send_flags & IB_SEND_INLINE) {
+                               info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr;
+                               info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
+                               info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+                               info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+                               info.op.inline_rdma_write.rem_addr.len = ib_wr->sg_list->length;
+                               ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false);
+                       } else {
+                               info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
+                               info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
+                               info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+                               info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+                               info.op.rdma_write.rem_addr.len = ib_wr->sg_list->length;
+                               ret = ukqp->ops.iw_rdma_write(ukqp, &info, false);
+                       }
+
+                       if (ret)
+                               err = -EIO;
+                       break;
+               case IB_WR_RDMA_READ:
+                       info.op_type = I40IW_OP_TYPE_RDMA_READ;
+                       info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
+                       info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+                       info.op.rdma_read.rem_addr.len = ib_wr->sg_list->length;
+                       info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr;
+                       info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey;
+                       info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length;
+                       ret = ukqp->ops.iw_rdma_read(ukqp, &info, false, false);
+                       if (ret)
+                               err = -EIO;
+                       break;
+               default:
+                       err = -EINVAL;
+                       i40iw_pr_err(" upost_send bad opcode = 0x%x\n",
+                                    ib_wr->opcode);
+                       break;
+               }
+
+               if (err)
+                       break;
+               ib_wr = ib_wr->next;
+       }
+
+       if (err)
+               *bad_wr = ib_wr;
+       else
+               ukqp->ops.iw_qp_post_wr(ukqp);
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+
+       return err;
+}
+
+/**
+ * i40iw_post_recv - post receive wr for kernel application
+ * @ibqp: ib qp pointer
+ * @ib_wr: work request for receive
+ * @bad_wr: bad wr caused an error
+ */
+static int i40iw_post_recv(struct ib_qp *ibqp,
+                          struct ib_recv_wr *ib_wr,
+                          struct ib_recv_wr **bad_wr)
+{
+       struct i40iw_qp *iwqp;
+       struct i40iw_qp_uk *ukqp;
+       struct i40iw_post_rq_info post_recv;
+       struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT];
+       enum i40iw_status_code ret = 0;
+       unsigned long flags;
+
+       iwqp = (struct i40iw_qp *)ibqp;
+       ukqp = &iwqp->sc_qp.qp_uk;
+
+       memset(&post_recv, 0, sizeof(post_recv));
+       spin_lock_irqsave(&iwqp->lock, flags);
+       while (ib_wr) {
+               post_recv.num_sges = ib_wr->num_sge;
+               post_recv.wr_id = ib_wr->wr_id;
+               i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
+               post_recv.sg_list = sg_list;
+               ret = ukqp->ops.iw_post_receive(ukqp, &post_recv);
+               if (ret) {
+                       i40iw_pr_err(" post_recv err %d\n", ret);
+                       *bad_wr = ib_wr;
+                       goto out;
+               }
+               ib_wr = ib_wr->next;
+       }
+ out:
+       spin_unlock_irqrestore(&iwqp->lock, flags);
+       if (ret)
+               return -ENOSYS;
+       return 0;
+}
+
+/**
+ * i40iw_poll_cq - poll cq for completion (kernel apps)
+ * @ibcq: cq to poll
+ * @num_entries: number of entries to poll
+ * @entry: wr of entry completed
+ */
+static int i40iw_poll_cq(struct ib_cq *ibcq,
+                        int num_entries,
+                        struct ib_wc *entry)
+{
+       struct i40iw_cq *iwcq;
+       int cqe_count = 0;
+       struct i40iw_cq_poll_info cq_poll_info;
+       enum i40iw_status_code ret;
+       struct i40iw_cq_uk *ukcq;
+       struct i40iw_sc_qp *qp;
+       unsigned long flags;
+
+       iwcq = (struct i40iw_cq *)ibcq;
+       ukcq = &iwcq->sc_cq.cq_uk;
+
+       spin_lock_irqsave(&iwcq->lock, flags);
+       while (cqe_count < num_entries) {
+               ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info, true);
+               if (ret == I40IW_ERR_QUEUE_EMPTY) {
+                       break;
+               } else if (ret) {
+                       if (!cqe_count)
+                               cqe_count = -1;
+                       break;
+               }
+               entry->wc_flags = 0;
+               entry->wr_id = cq_poll_info.wr_id;
+               if (!cq_poll_info.error)
+                       entry->status = IB_WC_SUCCESS;
+               else
+                       entry->status = IB_WC_WR_FLUSH_ERR;
+
+               switch (cq_poll_info.op_type) {
+               case I40IW_OP_TYPE_RDMA_WRITE:
+                       entry->opcode = IB_WC_RDMA_WRITE;
+                       break;
+               case I40IW_OP_TYPE_RDMA_READ_INV_STAG:
+               case I40IW_OP_TYPE_RDMA_READ:
+                       entry->opcode = IB_WC_RDMA_READ;
+                       break;
+               case I40IW_OP_TYPE_SEND_SOL:
+               case I40IW_OP_TYPE_SEND_SOL_INV:
+               case I40IW_OP_TYPE_SEND_INV:
+               case I40IW_OP_TYPE_SEND:
+                       entry->opcode = IB_WC_SEND;
+                       break;
+               case I40IW_OP_TYPE_REC:
+                       entry->opcode = IB_WC_RECV;
+                       break;
+               default:
+                       entry->opcode = IB_WC_RECV;
+                       break;
+               }
+
+               entry->vendor_err =
+                   cq_poll_info.major_err << 16 | cq_poll_info.minor_err;
+               entry->ex.imm_data = 0;
+               qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle;
+               entry->qp = (struct ib_qp *)qp->back_qp;
+               entry->src_qp = cq_poll_info.qp_id;
+               entry->byte_len = cq_poll_info.bytes_xfered;
+               entry++;
+               cqe_count++;
+       }
+       spin_unlock_irqrestore(&iwcq->lock, flags);
+       return cqe_count;
+}
+
+/**
+ * i40iw_req_notify_cq - arm cq kernel application
+ * @ibcq: cq to arm
+ * @notify_flags: notofication flags
+ */
+static int i40iw_req_notify_cq(struct ib_cq *ibcq,
+                              enum ib_cq_notify_flags notify_flags)
+{
+       struct i40iw_cq *iwcq;
+       struct i40iw_cq_uk *ukcq;
+       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+
+       iwcq = (struct i40iw_cq *)ibcq;
+       ukcq = &iwcq->sc_cq.cq_uk;
+       if (notify_flags == IB_CQ_NEXT_COMP)
+               cq_notify = IW_CQ_COMPL_EVENT;
+       ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+       return 0;
+}
+
+/**
+ * i40iw_port_immutable - return port's immutable data
+ * @ibdev: ib dev struct
+ * @port_num: port number
+ * @immutable: immutable data for the port return
+ */
+static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
+                               struct ib_port_immutable *immutable)
+{
+       struct ib_port_attr attr;
+       int err;
+
+       err = i40iw_query_port(ibdev, port_num, &attr);
+
+       if (err)
+               return err;
+
+       immutable->pkey_tbl_len = attr.pkey_tbl_len;
+       immutable->gid_tbl_len = attr.gid_tbl_len;
+       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
+
+       return 0;
+}
+
+/**
+ * i40iw_get_protocol_stats - Populates the rdma_stats structure
+ * @ibdev: ib dev struct
+ * @stats: iw protocol stats struct
+ */
+static int i40iw_get_protocol_stats(struct ib_device *ibdev,
+                                   union rdma_protocol_stats *stats)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+       struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+       struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
+       struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
+       struct timespec curr_time;
+       static struct timespec last_rd_time = {0, 0};
+       enum i40iw_status_code status = 0;
+       unsigned long flags;
+
+       curr_time = current_kernel_time();
+       memset(stats, 0, sizeof(*stats));
+
+       if (dev->is_pf) {
+               spin_lock_irqsave(&devstat->stats_lock, flags);
+               devstat->ops.iw_hw_stat_read_all(devstat,
+                       &devstat->hw_stats);
+               spin_unlock_irqrestore(&devstat->stats_lock, flags);
+       } else {
+               if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
+                       status = i40iw_vchnl_vf_get_pe_stats(dev,
+                                                            &devstat->hw_stats);
+
+               if (status)
+                       return -ENOSYS;
+       }
+
+       stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
+                                hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
+       stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
+                                     hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
+       stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
+                                hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
+       stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
+                                 hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
+       stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
+                                hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
+       stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
+                                 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
+       stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
+                                 hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
+       stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
+                                  hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
+       stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
+       stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
+       stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
+
+       last_rd_time = curr_time;
+       return 0;
+}
+
+/**
+ * i40iw_query_gid - Query port GID
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: Entry index
+ * @gid: Global ID
+ */
+static int i40iw_query_gid(struct ib_device *ibdev,
+                          u8 port,
+                          int index,
+                          union ib_gid *gid)
+{
+       struct i40iw_device *iwdev = to_iwdev(ibdev);
+
+       memset(gid->raw, 0, sizeof(gid->raw));
+       ether_addr_copy(gid->raw, iwdev->netdev->dev_addr);
+       return 0;
+}
+
+/**
+ * i40iw_modify_port  Modify port properties
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @port_modify_mask: mask for port modifications
+ * @props: port properties
+ */
+static int i40iw_modify_port(struct ib_device *ibdev,
+                            u8 port,
+                            int port_modify_mask,
+                            struct ib_port_modify *props)
+{
+       return 0;
+}
+
+/**
+ * i40iw_query_pkey - Query partition key
+ * @ibdev: device pointer from stack
+ * @port: port number
+ * @index: index of pkey
+ * @pkey: pointer to store the pkey
+ */
+static int i40iw_query_pkey(struct ib_device *ibdev,
+                           u8 port,
+                           u16 index,
+                           u16 *pkey)
+{
+       *pkey = 0;
+       return 0;
+}
+
+/**
+ * i40iw_create_ah - create address handle
+ * @ibpd: ptr of pd
+ * @ah_attr: address handle attributes
+ */
+static struct ib_ah *i40iw_create_ah(struct ib_pd *ibpd,
+                                    struct ib_ah_attr *attr)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * i40iw_destroy_ah - Destroy address handle
+ * @ah: pointer to address handle
+ */
+static int i40iw_destroy_ah(struct ib_ah *ah)
+{
+       return -ENOSYS;
+}
+
+/**
+ * i40iw_init_rdma_device - initialization of iwarp device
+ * @iwdev: iwarp device
+ */
+static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev)
+{
+       struct i40iw_ib_device *iwibdev;
+       struct net_device *netdev = iwdev->netdev;
+       struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context;
+
+       iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev));
+       if (!iwibdev) {
+               i40iw_pr_err("iwdev == NULL\n");
+               return NULL;
+       }
+       strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
+       iwibdev->ibdev.owner = THIS_MODULE;
+       iwdev->iwibdev = iwibdev;
+       iwibdev->iwdev = iwdev;
+
+       iwibdev->ibdev.node_type = RDMA_NODE_RNIC;
+       ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr);
+
+       iwibdev->ibdev.uverbs_cmd_mask =
+           (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+           (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+           (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+           (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+           (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+           (1ull << IB_USER_VERBS_CMD_REG_MR) |
+           (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+           (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+           (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+           (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+           (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
+           (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+           (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+           (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+           (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
+           (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+           (1ull << IB_USER_VERBS_CMD_DESTROY_AH) |
+           (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+           (1ull << IB_USER_VERBS_CMD_POST_RECV) |
+           (1ull << IB_USER_VERBS_CMD_POST_SEND);
+       iwibdev->ibdev.phys_port_cnt = 1;
+       iwibdev->ibdev.num_comp_vectors = 1;
+       iwibdev->ibdev.dma_device = &pcidev->dev;
+       iwibdev->ibdev.dev.parent = &pcidev->dev;
+       iwibdev->ibdev.query_port = i40iw_query_port;
+       iwibdev->ibdev.modify_port = i40iw_modify_port;
+       iwibdev->ibdev.query_pkey = i40iw_query_pkey;
+       iwibdev->ibdev.query_gid = i40iw_query_gid;
+       iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext;
+       iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext;
+       iwibdev->ibdev.mmap = i40iw_mmap;
+       iwibdev->ibdev.alloc_pd = i40iw_alloc_pd;
+       iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd;
+       iwibdev->ibdev.create_qp = i40iw_create_qp;
+       iwibdev->ibdev.modify_qp = i40iw_modify_qp;
+       iwibdev->ibdev.query_qp = i40iw_query_qp;
+       iwibdev->ibdev.destroy_qp = i40iw_destroy_qp;
+       iwibdev->ibdev.create_cq = i40iw_create_cq;
+       iwibdev->ibdev.destroy_cq = i40iw_destroy_cq;
+       iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
+       iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
+       iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
+       iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+       iwibdev->ibdev.query_device = i40iw_query_device;
+       iwibdev->ibdev.create_ah = i40iw_create_ah;
+       iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
+       iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL);
+       if (!iwibdev->ibdev.iwcm) {
+               ib_dealloc_device(&iwibdev->ibdev);
+               i40iw_pr_err("iwcm == NULL\n");
+               return NULL;
+       }
+
+       iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref;
+       iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref;
+       iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp;
+       iwibdev->ibdev.iwcm->connect = i40iw_connect;
+       iwibdev->ibdev.iwcm->accept = i40iw_accept;
+       iwibdev->ibdev.iwcm->reject = i40iw_reject;
+       iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen;
+       iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen;
+       memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name,
+              sizeof(iwibdev->ibdev.iwcm->ifname));
+       iwibdev->ibdev.get_port_immutable   = i40iw_port_immutable;
+       iwibdev->ibdev.poll_cq = i40iw_poll_cq;
+       iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq;
+       iwibdev->ibdev.post_send = i40iw_post_send;
+       iwibdev->ibdev.post_recv = i40iw_post_recv;
+
+       return iwibdev;
+}
+
+/**
+ * i40iw_port_ibevent - indicate port event
+ * @iwdev: iwarp device
+ */
+void i40iw_port_ibevent(struct i40iw_device *iwdev)
+{
+       struct i40iw_ib_device *iwibdev = iwdev->iwibdev;
+       struct ib_event event;
+
+       event.device = &iwibdev->ibdev;
+       event.element.port_num = 1;
+       event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR;
+       ib_dispatch_event(&event);
+}
+
+/**
+ * i40iw_unregister_rdma_device - unregister of iwarp from IB
+ * @iwibdev: rdma device ptr
+ */
+static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
+               device_remove_file(&iwibdev->ibdev.dev,
+                                  i40iw_dev_attributes[i]);
+       ib_unregister_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_destroy_rdma_device - destroy rdma device and free resources
+ * @iwibdev: IB device ptr
+ */
+void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
+{
+       if (!iwibdev)
+               return;
+
+       i40iw_unregister_rdma_device(iwibdev);
+       kfree(iwibdev->ibdev.iwcm);
+       iwibdev->ibdev.iwcm = NULL;
+       ib_dealloc_device(&iwibdev->ibdev);
+}
+
+/**
+ * i40iw_register_rdma_device - register iwarp device to IB
+ * @iwdev: iwarp device
+ */
+int i40iw_register_rdma_device(struct i40iw_device *iwdev)
+{
+       int i, ret;
+       struct i40iw_ib_device *iwibdev;
+
+       iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
+       if (!iwdev->iwibdev)
+               return -ENOSYS;
+       iwibdev = iwdev->iwibdev;
+
+       ret = ib_register_device(&iwibdev->ibdev, NULL);
+       if (ret)
+               goto error;
+
+       for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
+               ret =
+                   device_create_file(&iwibdev->ibdev.dev,
+                                      i40iw_dev_attributes[i]);
+               if (ret) {
+                       while (i > 0) {
+                               i--;
+                               device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
+                       }
+                       ib_unregister_device(&iwibdev->ibdev);
+                       goto error;
+               }
+       }
+       return 0;
+error:
+       kfree(iwdev->iwibdev->ibdev.iwcm);
+       iwdev->iwibdev->ibdev.iwcm = NULL;
+       ib_dealloc_device(&iwdev->iwibdev->ibdev);
+       return -ENOSYS;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h
new file mode 100644 (file)
index 0000000..1101f77
--- /dev/null
@@ -0,0 +1,173 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VERBS_H
+#define I40IW_VERBS_H
+
+struct i40iw_ucontext {
+       struct ib_ucontext ibucontext;
+       struct i40iw_device *iwdev;
+       struct list_head cq_reg_mem_list;
+       spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */
+       struct list_head qp_reg_mem_list;
+       spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */
+};
+
+struct i40iw_pd {
+       struct ib_pd ibpd;
+       struct i40iw_sc_pd sc_pd;
+       atomic_t usecount;
+};
+
+struct i40iw_hmc_pble {
+       union {
+               u32 idx;
+               dma_addr_t addr;
+       };
+};
+
+struct i40iw_cq_mr {
+       struct i40iw_hmc_pble cq_pbl;
+       dma_addr_t shadow;
+};
+
+struct i40iw_qp_mr {
+       struct i40iw_hmc_pble sq_pbl;
+       struct i40iw_hmc_pble rq_pbl;
+       dma_addr_t shadow;
+       struct page *sq_page;
+};
+
+struct i40iw_pbl {
+       struct list_head list;
+       union {
+               struct i40iw_qp_mr qp_mr;
+               struct i40iw_cq_mr cq_mr;
+       };
+
+       bool pbl_allocated;
+       u64 user_base;
+       struct i40iw_pble_alloc pble_alloc;
+       struct i40iw_mr *iwmr;
+};
+
+#define MAX_SAVE_PAGE_ADDRS     4
+struct i40iw_mr {
+       union {
+               struct ib_mr ibmr;
+               struct ib_mw ibmw;
+               struct ib_fmr ibfmr;
+       };
+       struct ib_umem *region;
+       u16 type;
+       u32 page_cnt;
+       u32 stag;
+       u64 length;
+       u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS];
+       struct i40iw_pbl iwpbl;
+};
+
+struct i40iw_cq {
+       struct ib_cq ibcq;
+       struct i40iw_sc_cq sc_cq;
+       u16 cq_head;
+       u16 cq_size;
+       u16 cq_number;
+       bool user_mode;
+       u32 polled_completions;
+       u32 cq_mem_size;
+       struct i40iw_dma_mem kmem;
+       spinlock_t lock; /* for poll cq */
+       struct i40iw_pbl *iwpbl;
+};
+
+struct disconn_work {
+       struct work_struct work;
+       struct i40iw_qp *iwqp;
+};
+
+struct iw_cm_id;
+struct ietf_mpa_frame;
+struct i40iw_ud_file;
+
+struct i40iw_qp_kmode {
+       struct i40iw_dma_mem dma_mem;
+       u64 *wrid_mem;
+};
+
+struct i40iw_qp {
+       struct ib_qp ibqp;
+       struct i40iw_sc_qp sc_qp;
+       struct i40iw_device *iwdev;
+       struct i40iw_cq *iwscq;
+       struct i40iw_cq *iwrcq;
+       struct i40iw_pd *iwpd;
+       struct i40iw_qp_host_ctx_info ctx_info;
+       struct i40iwarp_offload_info iwarp_info;
+       void *allocated_buffer;
+       atomic_t refcount;
+       struct iw_cm_id *cm_id;
+       void *cm_node;
+       struct ib_mr *lsmm_mr;
+       struct work_struct work;
+       enum ib_qp_state ibqp_state;
+       u32 iwarp_state;
+       u32 qp_mem_size;
+       u32 last_aeq;
+       atomic_t close_timer_started;
+       spinlock_t lock; /* for post work requests */
+       struct i40iw_qp_context *iwqp_context;
+       void *pbl_vbase;
+       dma_addr_t pbl_pbase;
+       struct page *page;
+       u8 active_conn:1;
+       u8 user_mode:1;
+       u8 hte_added:1;
+       u8 flush_issued:1;
+       u8 destroyed:1;
+       u8 sig_all:1;
+       u8 pau_mode:1;
+       u8 rsvd:1;
+       u16 term_sq_flush_code;
+       u16 term_rq_flush_code;
+       u8 hw_iwarp_state;
+       u8 hw_tcp_state;
+       struct i40iw_qp_kmode kqp;
+       struct i40iw_dma_mem host_ctx;
+       struct timer_list terminate_timer;
+       struct i40iw_pbl *iwpbl;
+       struct i40iw_dma_mem q2_ctx_mem;
+       struct i40iw_dma_mem ietf_mem;
+};
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c
new file mode 100644 (file)
index 0000000..cb0f183
--- /dev/null
@@ -0,0 +1,85 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_vf.h"
+
+/**
+ * i40iw_manage_vf_pble_bp - manage vf pble
+ * @cqp: cqp for cqp' sq wqe
+ * @info: pble info
+ * @scratch: pointer for completion
+ * @post_sq: to post and ring
+ */
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+                                              struct i40iw_manage_vf_pble_info *info,
+                                              u64 scratch,
+                                              bool post_sq)
+{
+       u64 *wqe;
+       u64 temp, header, pd_pl_pba = 0;
+
+       wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
+       if (!wqe)
+               return I40IW_ERR_RING_FULL;
+
+       temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) |
+           LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) |
+           LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX);
+       set_64bit_val(wqe, 16, temp);
+
+       header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) |
+           LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) |
+           LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
+       set_64bit_val(wqe, 24, header);
+
+       pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA);
+       set_64bit_val(wqe, 32, pd_pl_pba);
+
+       i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8);
+
+       if (post_sq)
+               i40iw_sc_cqp_post_sq(cqp);
+       return 0;
+}
+
+struct i40iw_vf_cqp_ops iw_vf_cqp_ops = {
+       i40iw_manage_vf_pble_bp
+};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h
new file mode 100644 (file)
index 0000000..f649f3a
--- /dev/null
@@ -0,0 +1,62 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VF_H
+#define I40IW_VF_H
+
+struct i40iw_sc_cqp;
+
+struct i40iw_manage_vf_pble_info {
+       u32 sd_index;
+       u16 first_pd_index;
+       u16 pd_entry_cnt;
+       u8 inv_pd_ent;
+       u64 pd_pl_pba;
+};
+
+struct i40iw_vf_cqp_ops {
+       enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *,
+                                                   struct i40iw_manage_vf_pble_info *,
+                                                   u64,
+                                                   bool);
+};
+
+enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp,
+                                              struct i40iw_manage_vf_pble_info *info,
+                                              u64 scratch,
+                                              bool post_sq);
+
+extern struct i40iw_vf_cqp_ops iw_vf_cqp_ops;
+
+#endif
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c
new file mode 100644 (file)
index 0000000..6b68f78
--- /dev/null
@@ -0,0 +1,748 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#include "i40iw_osdep.h"
+#include "i40iw_register.h"
+#include "i40iw_status.h"
+#include "i40iw_hmc.h"
+#include "i40iw_d.h"
+#include "i40iw_type.h"
+#include "i40iw_p.h"
+#include "i40iw_virtchnl.h"
+
+/**
+ * vchnl_vf_send_get_ver_req - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev,
+                                                       struct i40iw_virtchnl_req *vchnl_req)
+{
+       enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+       if (!dev->vchnl_up)
+               return ret_code;
+
+       memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+       vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+       vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+       vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER;
+       vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0;
+       ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+       return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev,
+                                                           struct i40iw_virtchnl_req *vchnl_req)
+{
+       enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+       if (!dev->vchnl_up)
+               return ret_code;
+
+       memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+       vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+       vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg);
+       vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN;
+       vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0;
+       ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+       return ret_code;
+}
+
+/**
+ * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev,
+                                                            struct i40iw_virtchnl_req  *vchnl_req)
+{
+       enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+
+       if (!dev->vchnl_up)
+               return ret_code;
+
+       memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+       vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+       vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1;
+       vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS;
+       vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0;
+       ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+       return ret_code;
+}
+
+/**
+ * vchnl_vf_send_add_hmc_objs_req - Add HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ */
+static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev,
+                                                            struct i40iw_virtchnl_req *vchnl_req,
+                                                            enum i40iw_hmc_rsrc_type rsrc_type,
+                                                            u32 start_index,
+                                                            u32 rsrc_count)
+{
+       enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+       struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+       if (!dev->vchnl_up)
+               return ret_code;
+
+       add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+       memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+       memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+       vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+       vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+       vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE;
+       vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0;
+       add_hmc_obj->obj_type = (u16)rsrc_type;
+       add_hmc_obj->start_index = start_index;
+       add_hmc_obj->obj_count = rsrc_count;
+       ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+       return ret_code;
+}
+
+/**
+ * vchnl_vf_send_del_hmc_objs_req - del HMC objects
+ * @dev: IWARP device pointer
+ * @vchnl_req: Virtual channel message request pointer
+ * @ rsrc_type - resource type to delete
+ * @ start_index - starting index for resource
+ * @ rsrc_count - number of resource type to delete
+ */
+static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev,
+                                                            struct i40iw_virtchnl_req *vchnl_req,
+                                                            enum i40iw_hmc_rsrc_type rsrc_type,
+                                                            u32 start_index,
+                                                            u32 rsrc_count)
+{
+       enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg;
+       struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+
+       if (!dev->vchnl_up)
+               return ret_code;
+
+       add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+       memset(vchnl_msg, 0, sizeof(*vchnl_msg));
+       memset(add_hmc_obj, 0, sizeof(*add_hmc_obj));
+       vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req;
+       vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1;
+       vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE;
+       vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0;
+       add_hmc_obj->obj_type = (u16)rsrc_type;
+       add_hmc_obj->start_index = start_index;
+       add_hmc_obj->obj_count = rsrc_count;
+       ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len);
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+       return ret_code;
+}
+
+/**
+ * vchnl_pf_send_get_ver_resp - Send channel version to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev,
+                                      u32 vf_id,
+                                      struct i40iw_virtchnl_op_buf *vchnl_msg)
+{
+       enum i40iw_status_code ret_code;
+       u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1];
+       struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+       memset(resp_buffer, 0, sizeof(*resp_buffer));
+       vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+       vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+       vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+       *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0;
+       ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev,
+                                          u32 vf_id,
+                                          struct i40iw_virtchnl_op_buf *vchnl_msg,
+                                          u16 hmc_fcn)
+{
+       enum i40iw_status_code ret_code;
+       u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1];
+       struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+       memset(resp_buffer, 0, sizeof(*resp_buffer));
+       vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+       vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+       vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+       *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn;
+       ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ * @hw_stats: HW Stats struct
+ */
+
+static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev,
+                                           u32 vf_id,
+                                           struct i40iw_virtchnl_op_buf *vchnl_msg,
+                                           struct i40iw_dev_hw_stats hw_stats)
+{
+       enum i40iw_status_code ret_code;
+       u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1];
+       struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+       memset(resp_buffer, 0, sizeof(*resp_buffer));
+       vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+       vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+       vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS;
+       *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = hw_stats;
+       ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * vchnl_pf_send_error_resp - Send an error response to VF
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @vchnl_msg: Virtual channel message buffer pointer
+ */
+static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id,
+                                    struct i40iw_virtchnl_op_buf *vchnl_msg,
+                                    u16 op_ret_code)
+{
+       enum i40iw_status_code ret_code;
+       u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)];
+       struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer;
+
+       memset(resp_buffer, 0, sizeof(resp_buffer));
+       vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx;
+       vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer);
+       vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code;
+       ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer));
+       if (ret_code)
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: virt channel send failed 0x%x\n", __func__, ret_code);
+}
+
+/**
+ * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn
+ * @cqp_req_param: CQP Request param value
+ * @not_used: unused CQP callback parameter
+ */
+static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param,
+                                       struct i40iw_ccq_cqe_info *cqe_info)
+{
+       struct i40iw_vfdev *vf_dev = callback_param;
+       struct i40iw_virt_mem vf_dev_mem;
+
+       if (cqe_info->error) {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "CQP Completion Error on Get HMC Function.  Maj = 0x%04x, Minor = 0x%04x\n",
+                           cqe_info->maj_err_code, cqe_info->min_err_code);
+               dev->vf_dev[vf_dev->iw_vf_idx] = NULL;
+               vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg,
+                                        (u16)I40IW_ERR_CQP_COMPL_ERROR);
+               vf_dev_mem.va = vf_dev;
+               vf_dev_mem.size = sizeof(*vf_dev);
+               i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "CQP Completion Operation Return information = 0x%08x\n",
+                           cqe_info->op_ret_val);
+               vf_dev->pmf_index = (u16)cqe_info->op_ret_val;
+               vf_dev->msg_count--;
+               vchnl_pf_send_get_hmc_fcn_resp(dev,
+                                              vf_dev->vf_id,
+                                              &vf_dev->vf_msg_buffer.vchnl_msg,
+                                              vf_dev->pmf_index);
+       }
+}
+
+/**
+ * pf_add_hmc_obj - Callback for Add HMC Object
+ * @vf_dev: pointer to the VF Device
+ */
+static void pf_add_hmc_obj_callback(void *work_vf_dev)
+{
+       struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+       struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+       struct i40iw_hmc_create_obj_info info;
+       struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj;
+       enum i40iw_status_code ret_code;
+
+       if (!vf_dev->pf_hmc_initialized) {
+               ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL);
+               if (ret_code)
+                       goto add_out;
+               vf_dev->pf_hmc_initialized = true;
+       }
+
+       add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+       memset(&info, 0, sizeof(info));
+       info.hmc_info = hmc_info;
+       info.is_pf = false;
+       info.rsrc_type = (u32)add_hmc_obj->obj_type;
+       info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT;
+       info.start_idx = add_hmc_obj->start_index;
+       info.count = add_hmc_obj->obj_count;
+       i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+                   "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE.  Add %u type %u objects\n",
+                   info.count, info.rsrc_type);
+       ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info);
+       if (!ret_code)
+               vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count;
+add_out:
+       vf_dev->msg_count--;
+       vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * pf_del_hmc_obj_callback - Callback for delete HMC Object
+ * @work_vf_dev: pointer to the VF Device
+ */
+static void pf_del_hmc_obj_callback(void *work_vf_dev)
+{
+       struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev;
+       struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info;
+       struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg;
+       struct i40iw_hmc_del_obj_info info;
+       struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj;
+       enum i40iw_status_code ret_code = I40IW_SUCCESS;
+
+       if (!vf_dev->pf_hmc_initialized)
+               goto del_out;
+
+       del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf;
+
+       memset(&info, 0, sizeof(info));
+       info.hmc_info = hmc_info;
+       info.is_pf = false;
+       info.rsrc_type = (u32)del_hmc_obj->obj_type;
+       info.start_idx = del_hmc_obj->start_index;
+       info.count = del_hmc_obj->obj_count;
+       i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT,
+                   "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE.  Delete %u type %u objects\n",
+                   info.count, info.rsrc_type);
+       ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false);
+del_out:
+       vf_dev->msg_count--;
+       vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code);
+}
+
+/**
+ * i40iw_vchnl_recv_pf - Receive PF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+                                          u32 vf_id,
+                                          u8 *msg,
+                                          u16 len)
+{
+       struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg;
+       struct i40iw_vfdev *vf_dev = NULL;
+       struct i40iw_hmc_fcn_info hmc_fcn_info;
+       u16 iw_vf_idx;
+       u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT;
+       struct i40iw_virt_mem vf_dev_mem;
+       struct i40iw_virtchnl_work_info work_info;
+       struct i40iw_dev_pestat *devstat;
+       enum i40iw_status_code ret_code;
+       unsigned long flags;
+
+       if (!dev || !msg || !len)
+               return I40IW_ERR_PARAM;
+
+       if (!dev->vchnl_up)
+               return I40IW_ERR_NOT_READY;
+       if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
+               if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0)
+                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+               else
+                       vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
+               return I40IW_SUCCESS;
+       }
+       for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT;
+            iw_vf_idx++) {
+               if (!dev->vf_dev[iw_vf_idx]) {
+                       if (first_avail_iw_vf ==
+                           I40IW_MAX_PE_ENABLED_VF_COUNT)
+                               first_avail_iw_vf = iw_vf_idx;
+                       continue;
+               }
+               if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) {
+                       vf_dev = dev->vf_dev[iw_vf_idx];
+                       break;
+               }
+       }
+       if (vf_dev) {
+               if (!vf_dev->msg_count) {
+                       vf_dev->msg_count++;
+               } else {
+                       i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                   "VF%u already has a channel message in progress.\n",
+                                   vf_id);
+                       return I40IW_SUCCESS;
+               }
+       }
+       switch (vchnl_msg->iw_op_code) {
+       case I40IW_VCHNL_OP_GET_HMC_FCN:
+               if (!vf_dev &&
+                   (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) {
+                       ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) +
+                                                          (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX));
+                       if (!ret_code) {
+                               vf_dev = vf_dev_mem.va;
+                               vf_dev->stats_initialized = false;
+                               vf_dev->pf_dev = dev;
+                               vf_dev->msg_count = 1;
+                               vf_dev->vf_id = vf_id;
+                               vf_dev->iw_vf_idx = first_avail_iw_vf;
+                               vf_dev->pf_hmc_initialized = false;
+                               vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]);
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "vf_dev %p, hmc_info %p, hmc_obj %p\n",
+                                           vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj);
+                               dev->vf_dev[first_avail_iw_vf] = vf_dev;
+                               iw_vf_idx = first_avail_iw_vf;
+                       } else {
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "VF%u Unable to allocate a VF device structure.\n",
+                                           vf_id);
+                               vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY);
+                               return I40IW_SUCCESS;
+                       }
+                       memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+                       hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback;
+                       hmc_fcn_info.vf_id = vf_id;
+                       hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx;
+                       hmc_fcn_info.cqp_callback_param = vf_dev;
+                       hmc_fcn_info.free_fcn = false;
+                       ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
+                       if (ret_code)
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "VF%u error CQP HMC Function operation.\n",
+                                           vf_id);
+                       ret_code = i40iw_device_init_pestat(&vf_dev->dev_pestat);
+                       if (ret_code)
+                               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                                           "VF%u - i40iw_device_init_pestat failed\n",
+                                           vf_id);
+                       vf_dev->dev_pestat.ops.iw_hw_stat_init(&vf_dev->dev_pestat,
+                                                             (u8)vf_dev->pmf_index,
+                                                             dev->hw, false);
+                       vf_dev->stats_initialized = true;
+               } else {
+                       if (vf_dev) {
+                               vf_dev->msg_count--;
+                               vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index);
+                       } else {
+                               vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg,
+                                                        (u16)I40IW_ERR_NO_MEMORY);
+                       }
+               }
+               break;
+       case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE:
+               if (!vf_dev)
+                       return I40IW_ERR_BAD_PTR;
+               work_info.worker_vf_dev = vf_dev;
+               work_info.callback_fcn = pf_add_hmc_obj_callback;
+               memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+               i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+               break;
+       case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE:
+               if (!vf_dev)
+                       return I40IW_ERR_BAD_PTR;
+               work_info.worker_vf_dev = vf_dev;
+               work_info.callback_fcn = pf_del_hmc_obj_callback;
+               memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len);
+               i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx);
+               break;
+       case I40IW_VCHNL_OP_GET_STATS:
+               if (!vf_dev)
+                       return I40IW_ERR_BAD_PTR;
+               devstat = &vf_dev->dev_pestat;
+               spin_lock_irqsave(&dev->dev_pestat.stats_lock, flags);
+               devstat->ops.iw_hw_stat_read_all(devstat, &devstat->hw_stats);
+               spin_unlock_irqrestore(&dev->dev_pestat.stats_lock, flags);
+               vf_dev->msg_count--;
+               vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, devstat->hw_stats);
+               break;
+       default:
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n",
+                           vchnl_msg->iw_op_code);
+               vchnl_pf_send_error_resp(dev, vf_id,
+                                        vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED);
+       }
+       return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_recv_vf - Receive VF virtual channel messages
+ * @dev: IWARP device pointer
+ * @vf_id: Virtual function ID associated with the message
+ * @msg: Virtual channel message buffer pointer
+ * @len: Length of the virtual channels message
+ */
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+                                          u32 vf_id,
+                                          u8 *msg,
+                                          u16 len)
+{
+       struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg;
+       struct i40iw_virtchnl_req *vchnl_req;
+
+       vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx;
+       vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code;
+       if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) {
+               if (vchnl_req->parm_len && vchnl_req->parm)
+                       memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len);
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: Got response, data size %u\n", __func__,
+                           vchnl_req->parm_len);
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s: error length on response, Got %u, expected %u\n", __func__,
+                           len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1));
+       }
+
+       return I40IW_SUCCESS;
+}
+
+/**
+ * i40iw_vchnl_vf_get_ver - Request Channel version
+ * @dev: IWARP device pointer
+ * @vchnl_ver: Virtual channel message version pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+                                             u32 *vchnl_ver)
+{
+       struct i40iw_virtchnl_req vchnl_req;
+       enum i40iw_status_code ret_code;
+
+       memset(&vchnl_req, 0, sizeof(vchnl_req));
+       vchnl_req.dev = dev;
+       vchnl_req.parm = vchnl_ver;
+       vchnl_req.parm_len = sizeof(*vchnl_ver);
+       vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+       ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req);
+       if (!ret_code) {
+               ret_code = i40iw_vf_wait_vchnl_resp(dev);
+               if (!ret_code)
+                       ret_code = vchnl_req.ret_code;
+               else
+                       dev->vchnl_up = false;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s Send message failed 0x%0x\n", __func__, ret_code);
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function
+ * @dev: IWARP device pointer
+ * @hmc_fcn: HMC function index pointer
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+                                                 u16 *hmc_fcn)
+{
+       struct i40iw_virtchnl_req vchnl_req;
+       enum i40iw_status_code ret_code;
+
+       memset(&vchnl_req, 0, sizeof(vchnl_req));
+       vchnl_req.dev = dev;
+       vchnl_req.parm = hmc_fcn;
+       vchnl_req.parm_len = sizeof(*hmc_fcn);
+       vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+       ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req);
+       if (!ret_code) {
+               ret_code = i40iw_vf_wait_vchnl_resp(dev);
+               if (!ret_code)
+                       ret_code = vchnl_req.ret_code;
+               else
+                       dev->vchnl_up = false;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s Send message failed 0x%0x\n", __func__, ret_code);
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the objects to be added
+ * @rsrc_count: Number of resources to be added
+ */
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+                                                  enum i40iw_hmc_rsrc_type rsrc_type,
+                                                  u32 start_index,
+                                                  u32 rsrc_count)
+{
+       struct i40iw_virtchnl_req vchnl_req;
+       enum i40iw_status_code ret_code;
+
+       memset(&vchnl_req, 0, sizeof(vchnl_req));
+       vchnl_req.dev = dev;
+       vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+       ret_code = vchnl_vf_send_add_hmc_objs_req(dev,
+                                                 &vchnl_req,
+                                                 rsrc_type,
+                                                 start_index,
+                                                 rsrc_count);
+       if (!ret_code) {
+               ret_code = i40iw_vf_wait_vchnl_resp(dev);
+               if (!ret_code)
+                       ret_code = vchnl_req.ret_code;
+               else
+                       dev->vchnl_up = false;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s Send message failed 0x%0x\n", __func__, ret_code);
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_del_hmc_obj - del HMC obj
+ * @dev: IWARP device pointer
+ * @rsrc_type: HMC Resource type
+ * @start_index: Starting index of the object to delete
+ * @rsrc_count: Number of resources to be delete
+ */
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+                                                 enum i40iw_hmc_rsrc_type rsrc_type,
+                                                 u32 start_index,
+                                                 u32 rsrc_count)
+{
+       struct i40iw_virtchnl_req vchnl_req;
+       enum i40iw_status_code ret_code;
+
+       memset(&vchnl_req, 0, sizeof(vchnl_req));
+       vchnl_req.dev = dev;
+       vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+       ret_code = vchnl_vf_send_del_hmc_objs_req(dev,
+                                                 &vchnl_req,
+                                                 rsrc_type,
+                                                 start_index,
+                                                 rsrc_count);
+       if (!ret_code) {
+               ret_code = i40iw_vf_wait_vchnl_resp(dev);
+               if (!ret_code)
+                       ret_code = vchnl_req.ret_code;
+               else
+                       dev->vchnl_up = false;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s Send message failed 0x%0x\n", __func__, ret_code);
+       }
+       return ret_code;
+}
+
+/**
+ * i40iw_vchnl_vf_get_pe_stats - Get PE stats
+ * @dev: IWARP device pointer
+ * @hw_stats: HW stats struct
+ */
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+                                                  struct i40iw_dev_hw_stats *hw_stats)
+{
+       struct i40iw_virtchnl_req  vchnl_req;
+       enum i40iw_status_code ret_code;
+
+       memset(&vchnl_req, 0, sizeof(vchnl_req));
+       vchnl_req.dev = dev;
+       vchnl_req.parm = hw_stats;
+       vchnl_req.parm_len = sizeof(*hw_stats);
+       vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg;
+       ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req);
+       if (!ret_code) {
+               ret_code = i40iw_vf_wait_vchnl_resp(dev);
+               if (!ret_code)
+                       ret_code = vchnl_req.ret_code;
+               else
+                       dev->vchnl_up = false;
+       } else {
+               i40iw_debug(dev, I40IW_DEBUG_VIRT,
+                           "%s Send message failed 0x%0x\n", __func__, ret_code);
+       }
+       return ret_code;
+}
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h
new file mode 100644 (file)
index 0000000..24886ef
--- /dev/null
@@ -0,0 +1,124 @@
+/*******************************************************************************
+*
+* Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
+*
+* This software is available to you under a choice of one of two
+* licenses.  You may choose to be licensed under the terms of the GNU
+* General Public License (GPL) Version 2, available from the file
+* COPYING in the main directory of this source tree, or the
+* OpenFabrics.org BSD license below:
+*
+*   Redistribution and use in source and binary forms, with or
+*   without modification, are permitted provided that the following
+*   conditions are met:
+*
+*    - Redistributions of source code must retain the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer.
+*
+*    - Redistributions in binary form must reproduce the above
+*      copyright notice, this list of conditions and the following
+*      disclaimer in the documentation and/or other materials
+*      provided with the distribution.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+* SOFTWARE.
+*
+*******************************************************************************/
+
+#ifndef I40IW_VIRTCHNL_H
+#define I40IW_VIRTCHNL_H
+
+#include "i40iw_hmc.h"
+
+#pragma pack(push, 1)
+
+struct i40iw_virtchnl_op_buf {
+       u16 iw_op_code;
+       u16 iw_op_ver;
+       u16 iw_chnl_buf_len;
+       u16 rsvd;
+       u64 iw_chnl_op_ctx;
+       /* Member alignment MUST be maintained above this location */
+       u8 iw_chnl_buf[1];
+};
+
+struct i40iw_virtchnl_resp_buf {
+       u64 iw_chnl_op_ctx;
+       u16 iw_chnl_buf_len;
+       s16 iw_op_ret_code;
+       /* Member alignment MUST be maintained above this location */
+       u16 rsvd[2];
+       u8 iw_chnl_buf[1];
+};
+
+enum i40iw_virtchnl_ops {
+       I40IW_VCHNL_OP_GET_VER = 0,
+       I40IW_VCHNL_OP_GET_HMC_FCN,
+       I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE,
+       I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE,
+       I40IW_VCHNL_OP_GET_STATS
+};
+
+#define I40IW_VCHNL_OP_GET_VER_V0 0
+#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0
+#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0
+#define I40IW_VCHNL_OP_GET_STATS_V0 0
+#define I40IW_VCHNL_CHNL_VER_V0 0
+
+struct i40iw_dev_hw_stats;
+
+struct i40iw_virtchnl_hmc_obj_range {
+       u16 obj_type;
+       u16 rsvd;
+       u32 start_index;
+       u32 obj_count;
+};
+
+enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
+                                          u32 vf_id,
+                                          u8 *msg,
+                                          u16 len);
+
+enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev,
+                                          u32 vf_id,
+                                          u8 *msg,
+                                          u16 len);
+
+struct i40iw_virtchnl_req {
+       struct i40iw_sc_dev *dev;
+       struct i40iw_virtchnl_op_buf *vchnl_msg;
+       void *parm;
+       u32 vf_id;
+       u16 parm_len;
+       s16 ret_code;
+};
+
+#pragma pack(pop)
+
+enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev,
+                                             u32 *vchnl_ver);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev,
+                                                 u16 *hmc_fcn);
+
+enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev,
+                                                  enum i40iw_hmc_rsrc_type rsrc_type,
+                                                  u32 start_index,
+                                                  u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev,
+                                                 enum i40iw_hmc_rsrc_type rsrc_type,
+                                                 u32 start_index,
+                                                 u32 rsrc_count);
+
+enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev,
+                                                  struct i40iw_dev_hw_stats *hw_stats);
+#endif
index 21cb41a60fe8096c3392e9b886252ae37307d9a4..c74ef2620b859b800ca673e6b3246fa68c831f4f 100644 (file)
@@ -310,7 +310,7 @@ static void aliasguid_query_handler(int status,
        if (status) {
                pr_debug("(port: %d) failed: status = %d\n",
                         cb_ctx->port, status);
-               rec->time_to_run = ktime_get_real_ns() + 1 * NSEC_PER_SEC;
+               rec->time_to_run = ktime_get_boot_ns() + 1 * NSEC_PER_SEC;
                goto out;
        }
 
@@ -416,7 +416,7 @@ next_entry:
                         be64_to_cpu((__force __be64)rec->guid_indexes),
                         be64_to_cpu((__force __be64)applied_guid_indexes),
                         be64_to_cpu((__force __be64)declined_guid_indexes));
-               rec->time_to_run = ktime_get_real_ns() +
+               rec->time_to_run = ktime_get_boot_ns() +
                        resched_delay_sec * NSEC_PER_SEC;
        } else {
                rec->status = MLX4_GUID_INFO_STATUS_SET;
@@ -708,7 +708,7 @@ static int get_low_record_time_index(struct mlx4_ib_dev *dev, u8 port,
                }
        }
        if (resched_delay_sec) {
-               u64 curr_time = ktime_get_real_ns();
+               u64 curr_time = ktime_get_boot_ns();
 
                *resched_delay_sec = (low_record_time < curr_time) ? 0 :
                        div_u64((low_record_time - curr_time), NSEC_PER_SEC);
index 1c7ab6cabbb86989fba8952b8f5e6be645e313dc..914bc98e753f5da03f4a216e8d1cea6b311ddd3a 100644 (file)
@@ -1643,6 +1643,56 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
        return err;
 }
 
+static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev,
+                                     struct ib_flow_attr *flow_attr,
+                                     enum mlx4_net_trans_promisc_mode *type)
+{
+       int err = 0;
+
+       if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) ||
+           (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) ||
+           (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) {
+               return -EOPNOTSUPP;
+       }
+
+       if (flow_attr->num_of_specs == 0) {
+               type[0] = MLX4_FS_MC_SNIFFER;
+               type[1] = MLX4_FS_UC_SNIFFER;
+       } else {
+               union ib_flow_spec *ib_spec;
+
+               ib_spec = (union ib_flow_spec *)(flow_attr + 1);
+               if (ib_spec->type !=  IB_FLOW_SPEC_ETH)
+                       return -EINVAL;
+
+               /* if all is zero than MC and UC */
+               if (is_zero_ether_addr(ib_spec->eth.mask.dst_mac)) {
+                       type[0] = MLX4_FS_MC_SNIFFER;
+                       type[1] = MLX4_FS_UC_SNIFFER;
+               } else {
+                       u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01,
+                                           ib_spec->eth.mask.dst_mac[1],
+                                           ib_spec->eth.mask.dst_mac[2],
+                                           ib_spec->eth.mask.dst_mac[3],
+                                           ib_spec->eth.mask.dst_mac[4],
+                                           ib_spec->eth.mask.dst_mac[5]};
+
+                       /* Above xor was only on MC bit, non empty mask is valid
+                        * only if this bit is set and rest are zero.
+                        */
+                       if (!is_zero_ether_addr(&mac[0]))
+                               return -EINVAL;
+
+                       if (is_multicast_ether_addr(ib_spec->eth.val.dst_mac))
+                               type[0] = MLX4_FS_MC_SNIFFER;
+                       else
+                               type[0] = MLX4_FS_UC_SNIFFER;
+               }
+       }
+
+       return err;
+}
+
 static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                                    struct ib_flow_attr *flow_attr,
                                    int domain)
@@ -1653,6 +1703,10 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
+       if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
+           (flow_attr->type != IB_FLOW_ATTR_NORMAL))
+               return ERR_PTR(-EOPNOTSUPP);
+
        memset(type, 0, sizeof(type));
 
        mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
@@ -1663,7 +1717,19 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
 
        switch (flow_attr->type) {
        case IB_FLOW_ATTR_NORMAL:
-               type[0] = MLX4_FS_REGULAR;
+               /* If dont trap flag (continue match) is set, under specific
+                * condition traffic be replicated to given qp,
+                * without stealing it
+                */
+               if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) {
+                       err = mlx4_ib_add_dont_trap_rule(dev,
+                                                        flow_attr,
+                                                        type);
+                       if (err)
+                               goto err_free;
+               } else {
+                       type[0] = MLX4_FS_REGULAR;
+               }
                break;
 
        case IB_FLOW_ATTR_ALL_DEFAULT:
@@ -1675,8 +1741,8 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
                break;
 
        case IB_FLOW_ATTR_SNIFFER:
-               type[0] = MLX4_FS_UC_SNIFFER;
-               type[1] = MLX4_FS_MC_SNIFFER;
+               type[0] = MLX4_FS_MIRROR_RX_PORT;
+               type[1] = MLX4_FS_MIRROR_SX_PORT;
                break;
 
        default:
index 52ce7b000044f6e0814b59b08e1ea27a696e77d8..1eca01cebe51f6cc044a8e3430d25bfe114f13e0 100644 (file)
@@ -711,7 +711,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata);
 int mlx4_ib_dereg_mr(struct ib_mr *mr);
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata);
 int mlx4_ib_dealloc_mw(struct ib_mw *mw);
 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
                               enum ib_mr_type mr_type,
index 242b94ec105babe092ba3fee42c931ee24518adc..ce0b5aa8eb9b3a5695f9a9ae02d3f153f7dcc145 100644 (file)
@@ -32,6 +32,7 @@
  */
 
 #include <linux/slab.h>
+#include <rdma/ib_user_verbs.h>
 
 #include "mlx4_ib.h"
 
@@ -334,7 +335,8 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
        return 0;
 }
 
-struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
+struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata)
 {
        struct mlx4_ib_dev *dev = to_mdev(pd->device);
        struct mlx4_ib_mw *mw;
index 27a70159e2ea8813ad7cbffc4913b3561897af34..7493a83acd28dcc73c27395d53203a19f88565ce 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_MLX5_INFINIBAND)  += mlx5_ib.o
 
-mlx5_ib-y :=   main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o
+mlx5_ib-y :=   main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o
 mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
index fd1de31e0611cbbd3d8691e595677d8e182f3198..a00ba4418de9ba79875c5b17ce9192c3fff6ba9f 100644 (file)
@@ -207,7 +207,10 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
                break;
        case MLX5_CQE_RESP_SEND:
                wc->opcode   = IB_WC_RECV;
-               wc->wc_flags = 0;
+               wc->wc_flags = IB_WC_IP_CSUM_OK;
+               if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
+                              (cqe->hds_ip_ext & CQE_L4_OK))))
+                       wc->wc_flags = 0;
                break;
        case MLX5_CQE_RESP_SEND_IMM:
                wc->opcode      = IB_WC_RECV;
@@ -431,7 +434,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
        struct mlx5_core_qp *mqp;
        struct mlx5_ib_wq *wq;
        struct mlx5_sig_err_cqe *sig_err_cqe;
-       struct mlx5_core_mr *mmr;
+       struct mlx5_core_mkey *mmkey;
        struct mlx5_ib_mr *mr;
        uint8_t opcode;
        uint32_t qpn;
@@ -536,17 +539,17 @@ repoll:
        case MLX5_CQE_SIG_ERR:
                sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
 
-               read_lock(&dev->mdev->priv.mr_table.lock);
-               mmr = __mlx5_mr_lookup(dev->mdev,
-                                      mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
-               if (unlikely(!mmr)) {
-                       read_unlock(&dev->mdev->priv.mr_table.lock);
+               read_lock(&dev->mdev->priv.mkey_table.lock);
+               mmkey = __mlx5_mr_lookup(dev->mdev,
+                                        mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
+               if (unlikely(!mmkey)) {
+                       read_unlock(&dev->mdev->priv.mkey_table.lock);
                        mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
                                     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
                        return -EINVAL;
                }
 
-               mr = to_mibmr(mmr);
+               mr = to_mibmr(mmkey);
                get_sig_err_item(sig_err_cqe, &mr->sig->err_item);
                mr->sig->sig_err_exists = true;
                mr->sig->sigerr_count++;
@@ -558,25 +561,51 @@ repoll:
                             mr->sig->err_item.expected,
                             mr->sig->err_item.actual);
 
-               read_unlock(&dev->mdev->priv.mr_table.lock);
+               read_unlock(&dev->mdev->priv.mkey_table.lock);
                goto repoll;
        }
 
        return 0;
 }
 
+static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+                       struct ib_wc *wc)
+{
+       struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+       struct mlx5_ib_wc *soft_wc, *next;
+       int npolled = 0;
+
+       list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
+               if (npolled >= num_entries)
+                       break;
+
+               mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+                           cq->mcq.cqn);
+
+               wc[npolled++] = soft_wc->wc;
+               list_del(&soft_wc->list);
+               kfree(soft_wc);
+       }
+
+       return npolled;
+}
+
 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 {
        struct mlx5_ib_cq *cq = to_mcq(ibcq);
        struct mlx5_ib_qp *cur_qp = NULL;
        unsigned long flags;
+       int soft_polled = 0;
        int npolled;
        int err = 0;
 
        spin_lock_irqsave(&cq->lock, flags);
 
-       for (npolled = 0; npolled < num_entries; npolled++) {
-               err = mlx5_poll_one(cq, &cur_qp, wc + npolled);
+       if (unlikely(!list_empty(&cq->wc_list)))
+               soft_polled = poll_soft_wc(cq, num_entries, wc);
+
+       for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+               err = mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled);
                if (err)
                        break;
        }
@@ -587,7 +616,7 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        spin_unlock_irqrestore(&cq->lock, flags);
 
        if (err == 0 || err == -EAGAIN)
-               return npolled;
+               return soft_polled + npolled;
        else
                return err;
 }
@@ -595,16 +624,27 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
 {
        struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
+       struct mlx5_ib_cq *cq = to_mcq(ibcq);
        void __iomem *uar_page = mdev->priv.uuari.uars[0].map;
+       unsigned long irq_flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&cq->lock, irq_flags);
+       if (cq->notify_flags != IB_CQ_NEXT_COMP)
+               cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
 
-       mlx5_cq_arm(&to_mcq(ibcq)->mcq,
+       if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
+               ret = 1;
+       spin_unlock_irqrestore(&cq->lock, irq_flags);
+
+       mlx5_cq_arm(&cq->mcq,
                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
                    uar_page,
                    MLX5_GET_DOORBELL_LOCK(&mdev->priv.cq_uar_lock),
                    to_mcq(ibcq)->mcq.cons_index);
 
-       return 0;
+       return ret;
 }
 
 static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
@@ -757,6 +797,14 @@ static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
        mlx5_db_free(dev->mdev, &cq->db);
 }
 
+static void notify_soft_wc_handler(struct work_struct *work)
+{
+       struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
+                                            notify_work);
+
+       cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
                                const struct ib_cq_init_attr *attr,
                                struct ib_ucontext *context,
@@ -807,6 +855,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
                                       &index, &inlen);
                if (err)
                        goto err_create;
+
+               INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
        }
 
        cq->cqe_size = cqe_size;
@@ -832,6 +882,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
        cq->mcq.comp  = mlx5_ib_cq_comp;
        cq->mcq.event = mlx5_ib_cq_event;
 
+       INIT_LIST_HEAD(&cq->wc_list);
+
        if (context)
                if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
                        err = -EFAULT;
@@ -1219,3 +1271,27 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
        cq = to_mcq(ibcq);
        return cq->cqe_size;
 }
+
+/* Called from atomic context */
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
+{
+       struct mlx5_ib_wc *soft_wc;
+       struct mlx5_ib_cq *cq = to_mcq(ibcq);
+       unsigned long flags;
+
+       soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
+       if (!soft_wc)
+               return -ENOMEM;
+
+       soft_wc->wc = *wc;
+       spin_lock_irqsave(&cq->lock, flags);
+       list_add_tail(&soft_wc->list, &cq->wc_list);
+       if (cq->notify_flags == IB_CQ_NEXT_COMP ||
+           wc->status != IB_WC_SUCCESS) {
+               cq->notify_flags = 0;
+               schedule_work(&cq->notify_work);
+       }
+       spin_unlock_irqrestore(&cq->lock, flags);
+
+       return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
new file mode 100644 (file)
index 0000000..53e03c8
--- /dev/null
@@ -0,0 +1,548 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "mlx5_ib.h"
+
+struct mlx5_ib_gsi_wr {
+       struct ib_cqe cqe;
+       struct ib_wc wc;
+       int send_flags;
+       bool completed:1;
+};
+
+struct mlx5_ib_gsi_qp {
+       struct ib_qp ibqp;
+       struct ib_qp *rx_qp;
+       u8 port_num;
+       struct ib_qp_cap cap;
+       enum ib_sig_type sq_sig_type;
+       /* Serialize qp state modifications */
+       struct mutex mutex;
+       struct ib_cq *cq;
+       struct mlx5_ib_gsi_wr *outstanding_wrs;
+       u32 outstanding_pi, outstanding_ci;
+       int num_qps;
+       /* Protects access to the tx_qps. Post send operations synchronize
+        * with tx_qp creation in setup_qp(). Also protects the
+        * outstanding_wrs array and indices.
+        */
+       spinlock_t lock;
+       struct ib_qp **tx_qps;
+};
+
+static struct mlx5_ib_gsi_qp *gsi_qp(struct ib_qp *qp)
+{
+       return container_of(qp, struct mlx5_ib_gsi_qp, ibqp);
+}
+
+static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
+{
+       return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
+}
+
+static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
+{
+       return ++index % gsi->cap.max_send_wr;
+}
+
+#define for_each_outstanding_wr(gsi, index) \
+       for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
+            index = next_outstanding(gsi, index))
+
+/* Call with gsi->lock locked */
+static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
+{
+       struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
+       struct mlx5_ib_gsi_wr *wr;
+       u32 index;
+
+       for_each_outstanding_wr(gsi, index) {
+               wr = &gsi->outstanding_wrs[index];
+
+               if (!wr->completed)
+                       break;
+
+               if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
+                   wr->send_flags & IB_SEND_SIGNALED)
+                       WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
+
+               wr->completed = false;
+       }
+
+       gsi->outstanding_ci = index;
+}
+
+static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
+       struct mlx5_ib_gsi_wr *wr =
+               container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
+       u64 wr_id;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gsi->lock, flags);
+       wr->completed = true;
+       wr_id = wr->wc.wr_id;
+       wr->wc = *wc;
+       wr->wc.wr_id = wr_id;
+       wr->wc.qp = &gsi->ibqp;
+
+       generate_completions(gsi);
+       spin_unlock_irqrestore(&gsi->lock, flags);
+}
+
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+                                   struct ib_qp_init_attr *init_attr)
+{
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct mlx5_ib_gsi_qp *gsi;
+       struct ib_qp_init_attr hw_init_attr = *init_attr;
+       const u8 port_num = init_attr->port_num;
+       const int num_pkeys = pd->device->attrs.max_pkeys;
+       const int num_qps = mlx5_ib_deth_sqpn_cap(dev) ? num_pkeys : 0;
+       int ret;
+
+       mlx5_ib_dbg(dev, "creating GSI QP\n");
+
+       if (port_num > ARRAY_SIZE(dev->devr.ports) || port_num < 1) {
+               mlx5_ib_warn(dev,
+                            "invalid port number %d during GSI QP creation\n",
+                            port_num);
+               return ERR_PTR(-EINVAL);
+       }
+
+       gsi = kzalloc(sizeof(*gsi), GFP_KERNEL);
+       if (!gsi)
+               return ERR_PTR(-ENOMEM);
+
+       gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL);
+       if (!gsi->tx_qps) {
+               ret = -ENOMEM;
+               goto err_free;
+       }
+
+       gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
+                                      sizeof(*gsi->outstanding_wrs),
+                                      GFP_KERNEL);
+       if (!gsi->outstanding_wrs) {
+               ret = -ENOMEM;
+               goto err_free_tx;
+       }
+
+       mutex_init(&gsi->mutex);
+
+       mutex_lock(&dev->devr.mutex);
+
+       if (dev->devr.ports[port_num - 1].gsi) {
+               mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
+                            port_num);
+               ret = -EBUSY;
+               goto err_free_wrs;
+       }
+       gsi->num_qps = num_qps;
+       spin_lock_init(&gsi->lock);
+
+       gsi->cap = init_attr->cap;
+       gsi->sq_sig_type = init_attr->sq_sig_type;
+       gsi->ibqp.qp_num = 1;
+       gsi->port_num = port_num;
+
+       gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
+                             IB_POLL_SOFTIRQ);
+       if (IS_ERR(gsi->cq)) {
+               mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
+                            PTR_ERR(gsi->cq));
+               ret = PTR_ERR(gsi->cq);
+               goto err_free_wrs;
+       }
+
+       hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
+       hw_init_attr.send_cq = gsi->cq;
+       if (num_qps) {
+               hw_init_attr.cap.max_send_wr = 0;
+               hw_init_attr.cap.max_send_sge = 0;
+               hw_init_attr.cap.max_inline_data = 0;
+       }
+       gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
+       if (IS_ERR(gsi->rx_qp)) {
+               mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
+                            PTR_ERR(gsi->rx_qp));
+               ret = PTR_ERR(gsi->rx_qp);
+               goto err_destroy_cq;
+       }
+
+       dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
+
+       mutex_unlock(&dev->devr.mutex);
+
+       return &gsi->ibqp;
+
+err_destroy_cq:
+       ib_free_cq(gsi->cq);
+err_free_wrs:
+       mutex_unlock(&dev->devr.mutex);
+       kfree(gsi->outstanding_wrs);
+err_free_tx:
+       kfree(gsi->tx_qps);
+err_free:
+       kfree(gsi);
+       return ERR_PTR(ret);
+}
+
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
+{
+       struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+       const int port_num = gsi->port_num;
+       int qp_index;
+       int ret;
+
+       mlx5_ib_dbg(dev, "destroying GSI QP\n");
+
+       mutex_lock(&dev->devr.mutex);
+       ret = ib_destroy_qp(gsi->rx_qp);
+       if (ret) {
+               mlx5_ib_warn(dev, "unable to destroy hardware GSI QP. error %d\n",
+                            ret);
+               mutex_unlock(&dev->devr.mutex);
+               return ret;
+       }
+       dev->devr.ports[port_num - 1].gsi = NULL;
+       mutex_unlock(&dev->devr.mutex);
+       gsi->rx_qp = NULL;
+
+       for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) {
+               if (!gsi->tx_qps[qp_index])
+                       continue;
+               WARN_ON_ONCE(ib_destroy_qp(gsi->tx_qps[qp_index]));
+               gsi->tx_qps[qp_index] = NULL;
+       }
+
+       ib_free_cq(gsi->cq);
+
+       kfree(gsi->outstanding_wrs);
+       kfree(gsi->tx_qps);
+       kfree(gsi);
+
+       return 0;
+}
+
+static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
+{
+       struct ib_pd *pd = gsi->rx_qp->pd;
+       struct ib_qp_init_attr init_attr = {
+               .event_handler = gsi->rx_qp->event_handler,
+               .qp_context = gsi->rx_qp->qp_context,
+               .send_cq = gsi->cq,
+               .recv_cq = gsi->rx_qp->recv_cq,
+               .cap = {
+                       .max_send_wr = gsi->cap.max_send_wr,
+                       .max_send_sge = gsi->cap.max_send_sge,
+                       .max_inline_data = gsi->cap.max_inline_data,
+               },
+               .sq_sig_type = gsi->sq_sig_type,
+               .qp_type = IB_QPT_UD,
+               .create_flags = mlx5_ib_create_qp_sqpn_qp1(),
+       };
+
+       return ib_create_qp(pd, &init_attr);
+}
+
+static int modify_to_rts(struct mlx5_ib_gsi_qp *gsi, struct ib_qp *qp,
+                        u16 qp_index)
+{
+       struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct ib_qp_attr attr;
+       int mask;
+       int ret;
+
+       mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY | IB_QP_PORT;
+       attr.qp_state = IB_QPS_INIT;
+       attr.pkey_index = qp_index;
+       attr.qkey = IB_QP1_QKEY;
+       attr.port_num = gsi->port_num;
+       ret = ib_modify_qp(qp, &attr, mask);
+       if (ret) {
+               mlx5_ib_err(dev, "could not change QP%d state to INIT: %d\n",
+                           qp->qp_num, ret);
+               return ret;
+       }
+
+       attr.qp_state = IB_QPS_RTR;
+       ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
+       if (ret) {
+               mlx5_ib_err(dev, "could not change QP%d state to RTR: %d\n",
+                           qp->qp_num, ret);
+               return ret;
+       }
+
+       attr.qp_state = IB_QPS_RTS;
+       attr.sq_psn = 0;
+       ret = ib_modify_qp(qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
+       if (ret) {
+               mlx5_ib_err(dev, "could not change QP%d state to RTS: %d\n",
+                           qp->qp_num, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void setup_qp(struct mlx5_ib_gsi_qp *gsi, u16 qp_index)
+{
+       struct ib_device *device = gsi->rx_qp->device;
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct ib_qp *qp;
+       unsigned long flags;
+       u16 pkey;
+       int ret;
+
+       ret = ib_query_pkey(device, gsi->port_num, qp_index, &pkey);
+       if (ret) {
+               mlx5_ib_warn(dev, "unable to read P_Key at port %d, index %d\n",
+                            gsi->port_num, qp_index);
+               return;
+       }
+
+       if (!pkey) {
+               mlx5_ib_dbg(dev, "invalid P_Key at port %d, index %d.  Skipping.\n",
+                           gsi->port_num, qp_index);
+               return;
+       }
+
+       spin_lock_irqsave(&gsi->lock, flags);
+       qp = gsi->tx_qps[qp_index];
+       spin_unlock_irqrestore(&gsi->lock, flags);
+       if (qp) {
+               mlx5_ib_dbg(dev, "already existing GSI TX QP at port %d, index %d. Skipping\n",
+                           gsi->port_num, qp_index);
+               return;
+       }
+
+       qp = create_gsi_ud_qp(gsi);
+       if (IS_ERR(qp)) {
+               mlx5_ib_warn(dev, "unable to create hardware UD QP for GSI: %ld\n",
+                            PTR_ERR(qp));
+               return;
+       }
+
+       ret = modify_to_rts(gsi, qp, qp_index);
+       if (ret)
+               goto err_destroy_qp;
+
+       spin_lock_irqsave(&gsi->lock, flags);
+       WARN_ON_ONCE(gsi->tx_qps[qp_index]);
+       gsi->tx_qps[qp_index] = qp;
+       spin_unlock_irqrestore(&gsi->lock, flags);
+
+       return;
+
+err_destroy_qp:
+       WARN_ON_ONCE(qp);
+}
+
+static void setup_qps(struct mlx5_ib_gsi_qp *gsi)
+{
+       u16 qp_index;
+
+       for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index)
+               setup_qp(gsi, qp_index);
+}
+
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+                         int attr_mask)
+{
+       struct mlx5_ib_dev *dev = to_mdev(qp->device);
+       struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+       int ret;
+
+       mlx5_ib_dbg(dev, "modifying GSI QP to state %d\n", attr->qp_state);
+
+       mutex_lock(&gsi->mutex);
+       ret = ib_modify_qp(gsi->rx_qp, attr, attr_mask);
+       if (ret) {
+               mlx5_ib_warn(dev, "unable to modify GSI rx QP: %d\n", ret);
+               goto unlock;
+       }
+
+       if (to_mqp(gsi->rx_qp)->state == IB_QPS_RTS)
+               setup_qps(gsi);
+
+unlock:
+       mutex_unlock(&gsi->mutex);
+
+       return ret;
+}
+
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+                        int qp_attr_mask,
+                        struct ib_qp_init_attr *qp_init_attr)
+{
+       struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+       int ret;
+
+       mutex_lock(&gsi->mutex);
+       ret = ib_query_qp(gsi->rx_qp, qp_attr, qp_attr_mask, qp_init_attr);
+       qp_init_attr->cap = gsi->cap;
+       mutex_unlock(&gsi->mutex);
+
+       return ret;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
+                                     struct ib_ud_wr *wr, struct ib_wc *wc)
+{
+       struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+       struct mlx5_ib_gsi_wr *gsi_wr;
+
+       if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
+               mlx5_ib_warn(dev, "no available GSI work request.\n");
+               return -ENOMEM;
+       }
+
+       gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
+       gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
+
+       if (!wc) {
+               memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
+               gsi_wr->wc.pkey_index = wr->pkey_index;
+               gsi_wr->wc.wr_id = wr->wr.wr_id;
+       } else {
+               gsi_wr->wc = *wc;
+               gsi_wr->completed = true;
+       }
+
+       gsi_wr->cqe.done = &handle_single_completion;
+       wr->wr.wr_cqe = &gsi_wr->cqe;
+
+       return 0;
+}
+
+/* Call with gsi->lock locked */
+static int mlx5_ib_gsi_silent_drop(struct mlx5_ib_gsi_qp *gsi,
+                                   struct ib_ud_wr *wr)
+{
+       struct ib_wc wc = {
+               { .wr_id = wr->wr.wr_id },
+               .status = IB_WC_SUCCESS,
+               .opcode = IB_WC_SEND,
+               .qp = &gsi->ibqp,
+       };
+       int ret;
+
+       ret = mlx5_ib_add_outstanding_wr(gsi, wr, &wc);
+       if (ret)
+               return ret;
+
+       generate_completions(gsi);
+
+       return 0;
+}
+
+/* Call with gsi->lock locked */
+static struct ib_qp *get_tx_qp(struct mlx5_ib_gsi_qp *gsi, struct ib_ud_wr *wr)
+{
+       struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
+       int qp_index = wr->pkey_index;
+
+       if (!mlx5_ib_deth_sqpn_cap(dev))
+               return gsi->rx_qp;
+
+       if (qp_index >= gsi->num_qps)
+               return NULL;
+
+       return gsi->tx_qps[qp_index];
+}
+
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+                         struct ib_send_wr **bad_wr)
+{
+       struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+       struct ib_qp *tx_qp;
+       unsigned long flags;
+       int ret;
+
+       for (; wr; wr = wr->next) {
+               struct ib_ud_wr cur_wr = *ud_wr(wr);
+
+               cur_wr.wr.next = NULL;
+
+               spin_lock_irqsave(&gsi->lock, flags);
+               tx_qp = get_tx_qp(gsi, &cur_wr);
+               if (!tx_qp) {
+                       ret = mlx5_ib_gsi_silent_drop(gsi, &cur_wr);
+                       if (ret)
+                               goto err;
+                       spin_unlock_irqrestore(&gsi->lock, flags);
+                       continue;
+               }
+
+               ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
+               if (ret)
+                       goto err;
+
+               ret = ib_post_send(tx_qp, &cur_wr.wr, bad_wr);
+               if (ret) {
+                       /* Undo the effect of adding the outstanding wr */
+                       gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
+                                             gsi->cap.max_send_wr;
+                       goto err;
+               }
+               spin_unlock_irqrestore(&gsi->lock, flags);
+       }
+
+       return 0;
+
+err:
+       spin_unlock_irqrestore(&gsi->lock, flags);
+       *bad_wr = wr;
+       return ret;
+}
+
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+                         struct ib_recv_wr **bad_wr)
+{
+       struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
+
+       return ib_post_recv(gsi->rx_qp, wr, bad_wr);
+}
+
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi)
+{
+       if (!gsi)
+               return;
+
+       mutex_lock(&gsi->mutex);
+       setup_qps(gsi);
+       mutex_unlock(&gsi->mutex);
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
new file mode 100644 (file)
index 0000000..c1b9de8
--- /dev/null
@@ -0,0 +1,194 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_ib.h"
+
+static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
+{
+       switch (mlx_policy) {
+       case MLX5_POLICY_DOWN:
+               return IFLA_VF_LINK_STATE_DISABLE;
+       case MLX5_POLICY_UP:
+               return IFLA_VF_LINK_STATE_ENABLE;
+       case MLX5_POLICY_FOLLOW:
+               return IFLA_VF_LINK_STATE_AUTO;
+       default:
+               return __IFLA_VF_LINK_STATE_MAX;
+       }
+}
+
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+                         struct ifla_vf_info *info)
+{
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_hca_vport_context *rep;
+       int err;
+
+       rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+       if (!rep)
+               return -ENOMEM;
+
+       err = mlx5_query_hca_vport_context(mdev, 1, 1,  vf + 1, rep);
+       if (err) {
+               mlx5_ib_warn(dev, "failed to query port policy for vf %d (%d)\n",
+                            vf, err);
+               goto free;
+       }
+       memset(info, 0, sizeof(*info));
+       info->linkstate = mlx_to_net_policy(rep->policy);
+       if (info->linkstate == __IFLA_VF_LINK_STATE_MAX)
+               err = -EINVAL;
+
+free:
+       kfree(rep);
+       return err;
+}
+
+static inline enum port_state_policy net_to_mlx_policy(int policy)
+{
+       switch (policy) {
+       case IFLA_VF_LINK_STATE_DISABLE:
+               return MLX5_POLICY_DOWN;
+       case IFLA_VF_LINK_STATE_ENABLE:
+               return MLX5_POLICY_UP;
+       case IFLA_VF_LINK_STATE_AUTO:
+               return MLX5_POLICY_FOLLOW;
+       default:
+               return MLX5_POLICY_INVALID;
+       }
+}
+
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+                             u8 port, int state)
+{
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_hca_vport_context *in;
+       int err;
+
+       in = kzalloc(sizeof(*in), GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       in->policy = net_to_mlx_policy(state);
+       if (in->policy == MLX5_POLICY_INVALID) {
+               err = -EINVAL;
+               goto out;
+       }
+       in->field_select = MLX5_HCA_VPORT_SEL_STATE_POLICY;
+       err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+
+out:
+       kfree(in);
+       return err;
+}
+
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+                        u8 port, struct ifla_vf_stats *stats)
+{
+       int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+       struct mlx5_core_dev *mdev;
+       struct mlx5_ib_dev *dev;
+       void *out;
+       int err;
+
+       dev = to_mdev(device);
+       mdev = dev->mdev;
+
+       out = kzalloc(out_sz, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_core_query_vport_counter(mdev, true, vf, port, out, out_sz);
+       if (err)
+               goto ex;
+
+       stats->rx_packets = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.packets);
+       stats->tx_packets = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.packets);
+       stats->rx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_unicast.octets);
+       stats->tx_bytes = MLX5_GET64_PR(query_vport_counter_out, out, transmitted_ib_unicast.octets);
+       stats->multicast = MLX5_GET64_PR(query_vport_counter_out, out, received_ib_multicast.packets);
+
+ex:
+       kfree(out);
+       return err;
+}
+
+static int set_vf_node_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_hca_vport_context *in;
+       int err;
+
+       in = kzalloc(sizeof(*in), GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       in->field_select = MLX5_HCA_VPORT_SEL_NODE_GUID;
+       in->node_guid = guid;
+       err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+       kfree(in);
+       return err;
+}
+
+static int set_vf_port_guid(struct ib_device *device, int vf, u8 port, u64 guid)
+{
+       struct mlx5_ib_dev *dev = to_mdev(device);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       struct mlx5_hca_vport_context *in;
+       int err;
+
+       in = kzalloc(sizeof(*in), GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       in->field_select = MLX5_HCA_VPORT_SEL_PORT_GUID;
+       in->port_guid = guid;
+       err = mlx5_core_modify_hca_vport_context(mdev, 1, 1, vf + 1, in);
+       kfree(in);
+       return err;
+}
+
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+                       u64 guid, int type)
+{
+       if (type == IFLA_VF_IB_NODE_GUID)
+               return set_vf_node_guid(device, vf, port, guid);
+       else if (type == IFLA_VF_IB_PORT_GUID)
+               return set_vf_port_guid(device, vf, port, guid);
+
+       return -EINVAL;
+}
index b84d13a487cc04dcfbb501cfbd460c335bf91f8d..1534af1130588f03b5be1f4fae333d4277a02486 100644 (file)
  */
 
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/vport.h>
 #include <rdma/ib_mad.h>
 #include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
 #include "mlx5_ib.h"
 
 enum {
@@ -57,20 +59,12 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
        return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
 }
 
-int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                       const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                       const struct ib_mad_hdr *in, size_t in_mad_size,
-                       struct ib_mad_hdr *out, size_t *out_mad_size,
-                       u16 *out_mad_pkey_index)
+static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+                      const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+                      const struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
        u16 slid;
        int err;
-       const struct ib_mad *in_mad = (const struct ib_mad *)in;
-       struct ib_mad *out_mad = (struct ib_mad *)out;
-
-       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-                        *out_mad_size != sizeof(*out_mad)))
-               return IB_MAD_RESULT_FAILURE;
 
        slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
 
@@ -117,6 +111,156 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
        return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
 }
 
+static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
+                              void *out)
+{
+#define MLX5_SUM_CNT(p, cntr1, cntr2)  \
+       (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
+       MLX5_GET64(query_vport_counter_out, p, cntr2))
+
+       pma_cnt_ext->port_xmit_data =
+               cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
+                                        transmitted_ib_multicast.octets) >> 2);
+       pma_cnt_ext->port_xmit_data =
+               cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
+                                        received_ib_multicast.octets) >> 2);
+       pma_cnt_ext->port_xmit_packets =
+               cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
+                                        transmitted_ib_multicast.packets));
+       pma_cnt_ext->port_rcv_packets =
+               cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
+                                        received_ib_multicast.packets));
+       pma_cnt_ext->port_unicast_xmit_packets =
+               MLX5_GET64_BE(query_vport_counter_out,
+                             out, transmitted_ib_unicast.packets);
+       pma_cnt_ext->port_unicast_rcv_packets =
+               MLX5_GET64_BE(query_vport_counter_out,
+                             out, received_ib_unicast.packets);
+       pma_cnt_ext->port_multicast_xmit_packets =
+               MLX5_GET64_BE(query_vport_counter_out,
+                             out, transmitted_ib_multicast.packets);
+       pma_cnt_ext->port_multicast_rcv_packets =
+               MLX5_GET64_BE(query_vport_counter_out,
+                             out, received_ib_multicast.packets);
+}
+
+static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
+                          void *out)
+{
+       /* Traffic counters will be reported in
+        * their 64bit form via ib_pma_portcounters_ext by default.
+        */
+       void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
+                                    counter_set);
+
+#define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name)        {               \
+       counter_var = MLX5_GET_BE(typeof(counter_var),                  \
+                                 ib_port_cntrs_grp_data_layout,        \
+                                 out_pma, counter_name);               \
+       }
+
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
+                            symbol_error_counter);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
+                            link_error_recovery_counter);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
+                            link_downed_counter);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
+                            port_rcv_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
+                            port_rcv_remote_physical_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
+                            port_rcv_switch_relay_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
+                            port_xmit_discards);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
+                            port_xmit_constraint_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
+                            port_rcv_constraint_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
+                            link_overrun_errors);
+       MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
+                            vl_15_dropped);
+}
+
+static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
+                          const struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       int err;
+       void *out_cnt;
+
+       /* Decalring support of extended counters */
+       if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
+               struct ib_class_port_info cpi = {};
+
+               cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+               memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
+               return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+       }
+
+       if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
+               struct ib_pma_portcounters_ext *pma_cnt_ext =
+                       (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
+               int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+
+               out_cnt = mlx5_vzalloc(sz);
+               if (!out_cnt)
+                       return IB_MAD_RESULT_FAILURE;
+
+               err = mlx5_core_query_vport_counter(dev->mdev, 0, 0,
+                                                   port_num, out_cnt, sz);
+               if (!err)
+                       pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
+       } else {
+               struct ib_pma_portcounters *pma_cnt =
+                       (struct ib_pma_portcounters *)(out_mad->data + 40);
+               int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+               out_cnt = mlx5_vzalloc(sz);
+               if (!out_cnt)
+                       return IB_MAD_RESULT_FAILURE;
+
+               err = mlx5_core_query_ib_ppcnt(dev->mdev, port_num,
+                                              out_cnt, sz);
+               if (!err)
+                       pma_cnt_assign(pma_cnt, out_cnt);
+               }
+
+       kvfree(out_cnt);
+       if (err)
+               return IB_MAD_RESULT_FAILURE;
+
+       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+}
+
+int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+                       const struct ib_wc *in_wc, const struct ib_grh *in_grh,
+                       const struct ib_mad_hdr *in, size_t in_mad_size,
+                       struct ib_mad_hdr *out, size_t *out_mad_size,
+                       u16 *out_mad_pkey_index)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ibdev);
+       struct mlx5_core_dev *mdev = dev->mdev;
+       const struct ib_mad *in_mad = (const struct ib_mad *)in;
+       struct ib_mad *out_mad = (struct ib_mad *)out;
+
+       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
+                        *out_mad_size != sizeof(*out_mad)))
+               return IB_MAD_RESULT_FAILURE;
+
+       memset(out_mad->data, 0, sizeof(out_mad->data));
+
+       if (MLX5_CAP_GEN(mdev, vport_counters) &&
+           in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+           in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
+               return process_pma_cmd(ibdev, port_num, in_mad, out_mad);
+       } else {
+               return process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
+                                  in_mad, out_mad);
+       }
+}
+
 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
 {
        struct ib_smp *in_mad  = NULL;
index 03c418ccbc982e4114681044798c075a65357b28..e305990b73f6b78bc49f5213d7678e2b859cf18a 100644 (file)
@@ -283,7 +283,7 @@ __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
 
 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
 {
-       return !dev->mdev->issi;
+       return !MLX5_CAP_GEN(dev->mdev, ib_virt);
 }
 
 enum {
@@ -487,6 +487,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
                props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
        if (MLX5_CAP_GEN(mdev, xrc))
                props->device_cap_flags |= IB_DEVICE_XRC;
+       if (MLX5_CAP_GEN(mdev, imaicl)) {
+               props->device_cap_flags |= IB_DEVICE_MEM_WINDOW |
+                                          IB_DEVICE_MEM_WINDOW_TYPE_2B;
+               props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
+               /* We support 'Gappy' memory registration too */
+               props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG;
+       }
        props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
        if (MLX5_CAP_GEN(mdev, sho)) {
                props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
@@ -504,6 +511,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
            (MLX5_CAP_ETH(dev->mdev, csum_cap)))
                        props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM;
 
+       if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+               props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
+               props->device_cap_flags |= IB_DEVICE_UD_TSO;
+       }
+
        props->vendor_part_id      = mdev->pdev->device;
        props->hw_ver              = mdev->pdev->revision;
 
@@ -529,7 +541,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        props->local_ca_ack_delay  = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
        props->max_srq_sge         = max_rq_sg - 1;
-       props->max_fast_reg_page_list_len = (unsigned int)-1;
+       props->max_fast_reg_page_list_len =
+               1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
        get_atomic_caps(dev, props);
        props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_mcast_grp       = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
@@ -549,6 +562,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        if (MLX5_CAP_GEN(mdev, cd))
                props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
 
+       if (!mlx5_core_is_pf(mdev))
+               props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
+
        return 0;
 }
 
@@ -686,6 +702,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
        props->qkey_viol_cntr   = rep->qkey_violation_counter;
        props->subnet_timeout   = rep->subnet_timeout;
        props->init_type_reply  = rep->init_type_reply;
+       props->grh_required     = rep->grh_required;
 
        err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
        if (err)
@@ -1369,11 +1386,20 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
        return 0;
 }
 
+static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap)
+{
+       priority *= 2;
+       if (!dont_trap)
+               priority++;
+       return priority;
+}
+
 #define MLX5_FS_MAX_TYPES       10
 #define MLX5_FS_MAX_ENTRIES     32000UL
 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
                                                struct ib_flow_attr *flow_attr)
 {
+       bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
        struct mlx5_flow_namespace *ns = NULL;
        struct mlx5_ib_flow_prio *prio;
        struct mlx5_flow_table *ft;
@@ -1383,10 +1409,12 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
        int err = 0;
 
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
-               if (flow_is_multicast_only(flow_attr))
+               if (flow_is_multicast_only(flow_attr) &&
+                   !dont_trap)
                        priority = MLX5_IB_FLOW_MCAST_PRIO;
                else
-                       priority = flow_attr->priority;
+                       priority = ib_prio_to_core_prio(flow_attr->priority,
+                                                       dont_trap);
                ns = mlx5_get_flow_namespace(dev->mdev,
                                             MLX5_FLOW_NAMESPACE_BYPASS);
                num_entries = MLX5_FS_MAX_ENTRIES;
@@ -1434,6 +1462,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
        unsigned int spec_index;
        u32 *match_c;
        u32 *match_v;
+       u32 action;
        int err = 0;
 
        if (!is_valid_attr(flow_attr))
@@ -1459,9 +1488,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
 
        /* Outer header support only */
        match_criteria_enable = (!outer_header_zero(match_c)) << 0;
+       action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
+               MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
        handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
                                           match_c, match_v,
-                                          MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+                                          action,
                                           MLX5_FS_DEFAULT_FLOW_TAG,
                                           dst);
 
@@ -1481,6 +1512,29 @@ free:
        return err ? ERR_PTR(err) : handler;
 }
 
+static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
+                                                         struct mlx5_ib_flow_prio *ft_prio,
+                                                         struct ib_flow_attr *flow_attr,
+                                                         struct mlx5_flow_destination *dst)
+{
+       struct mlx5_ib_flow_handler *handler_dst = NULL;
+       struct mlx5_ib_flow_handler *handler = NULL;
+
+       handler = create_flow_rule(dev, ft_prio, flow_attr, NULL);
+       if (!IS_ERR(handler)) {
+               handler_dst = create_flow_rule(dev, ft_prio,
+                                              flow_attr, dst);
+               if (IS_ERR(handler_dst)) {
+                       mlx5_del_flow_rule(handler->rule);
+                       kfree(handler);
+                       handler = handler_dst;
+               } else {
+                       list_add(&handler_dst->list, &handler->list);
+               }
+       }
+
+       return handler;
+}
 enum {
        LEFTOVERS_MC,
        LEFTOVERS_UC,
@@ -1558,7 +1612,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
 
        if (domain != IB_FLOW_DOMAIN_USER ||
            flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
-           flow_attr->flags)
+           (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP))
                return ERR_PTR(-EINVAL);
 
        dst = kzalloc(sizeof(*dst), GFP_KERNEL);
@@ -1577,8 +1631,13 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
        dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
 
        if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
-               handler = create_flow_rule(dev, ft_prio, flow_attr,
-                                          dst);
+               if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)  {
+                       handler = create_dont_trap_rule(dev, ft_prio,
+                                                       flow_attr, dst);
+               } else {
+                       handler = create_flow_rule(dev, ft_prio, flow_attr,
+                                                  dst);
+               }
        } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
                   flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
                handler = create_leftovers_rule(dev, ft_prio, flow_attr,
@@ -1716,6 +1775,17 @@ static struct device_attribute *mlx5_class_attributes[] = {
        &dev_attr_reg_pages,
 };
 
+static void pkey_change_handler(struct work_struct *work)
+{
+       struct mlx5_ib_port_resources *ports =
+               container_of(work, struct mlx5_ib_port_resources,
+                            pkey_change_work);
+
+       mutex_lock(&ports->devr->mutex);
+       mlx5_ib_gsi_pkey_change(ports->gsi);
+       mutex_unlock(&ports->devr->mutex);
+}
+
 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
                          enum mlx5_dev_event event, unsigned long param)
 {
@@ -1752,6 +1822,8 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
        case MLX5_DEV_EVENT_PKEY_CHANGE:
                ibev.event = IB_EVENT_PKEY_CHANGE;
                port = (u8)param;
+
+               schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
                break;
 
        case MLX5_DEV_EVENT_GUID_CHANGE:
@@ -1838,7 +1910,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
                mlx5_ib_warn(dev, "mr cache cleanup failed\n");
 
        mlx5_ib_destroy_qp(dev->umrc.qp);
-       ib_destroy_cq(dev->umrc.cq);
+       ib_free_cq(dev->umrc.cq);
        ib_dealloc_pd(dev->umrc.pd);
 }
 
@@ -1853,7 +1925,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
        struct ib_pd *pd;
        struct ib_cq *cq;
        struct ib_qp *qp;
-       struct ib_cq_init_attr cq_attr = {};
        int ret;
 
        attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1870,15 +1941,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
                goto error_0;
        }
 
-       cq_attr.cqe = 128;
-       cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
-                         &cq_attr);
+       cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
        if (IS_ERR(cq)) {
                mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
                ret = PTR_ERR(cq);
                goto error_2;
        }
-       ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 
        init_attr->send_cq = cq;
        init_attr->recv_cq = cq;
@@ -1945,7 +2013,7 @@ error_4:
        mlx5_ib_destroy_qp(qp);
 
 error_3:
-       ib_destroy_cq(cq);
+       ib_free_cq(cq);
 
 error_2:
        ib_dealloc_pd(pd);
@@ -1961,10 +2029,13 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        struct ib_srq_init_attr attr;
        struct mlx5_ib_dev *dev;
        struct ib_cq_init_attr cq_attr = {.cqe = 1};
+       int port;
        int ret = 0;
 
        dev = container_of(devr, struct mlx5_ib_dev, devr);
 
+       mutex_init(&devr->mutex);
+
        devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
        if (IS_ERR(devr->p0)) {
                ret = PTR_ERR(devr->p0);
@@ -2052,6 +2123,12 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
        atomic_inc(&devr->p0->usecnt);
        atomic_set(&devr->s0->usecnt, 0);
 
+       for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) {
+               INIT_WORK(&devr->ports[port].pkey_change_work,
+                         pkey_change_handler);
+               devr->ports[port].devr = devr;
+       }
+
        return 0;
 
 error5:
@@ -2070,12 +2147,20 @@ error0:
 
 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
 {
+       struct mlx5_ib_dev *dev =
+               container_of(devr, struct mlx5_ib_dev, devr);
+       int port;
+
        mlx5_ib_destroy_srq(devr->s1);
        mlx5_ib_destroy_srq(devr->s0);
        mlx5_ib_dealloc_xrcd(devr->x0);
        mlx5_ib_dealloc_xrcd(devr->x1);
        mlx5_ib_destroy_cq(devr->c0);
        mlx5_ib_dealloc_pd(devr->p0);
+
+       /* Make sure no change P_Key work items are still executing */
+       for (port = 0; port < dev->num_ports; ++port)
+               cancel_work_sync(&devr->ports[port].pkey_change_work);
 }
 
 static u32 get_core_cap_flags(struct ib_device *ibdev)
@@ -2198,6 +2283,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
                (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
                (1ull << IB_USER_VERBS_CMD_REG_MR)              |
+               (1ull << IB_USER_VERBS_CMD_REREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
                (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
                (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
@@ -2258,6 +2344,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.req_notify_cq       = mlx5_ib_arm_cq;
        dev->ib_dev.get_dma_mr          = mlx5_ib_get_dma_mr;
        dev->ib_dev.reg_user_mr         = mlx5_ib_reg_user_mr;
+       dev->ib_dev.rereg_user_mr       = mlx5_ib_rereg_user_mr;
        dev->ib_dev.dereg_mr            = mlx5_ib_dereg_mr;
        dev->ib_dev.attach_mcast        = mlx5_ib_mcg_attach;
        dev->ib_dev.detach_mcast        = mlx5_ib_mcg_detach;
@@ -2266,9 +2353,23 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
        dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
+       if (mlx5_core_is_pf(mdev)) {
+               dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
+               dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
+               dev->ib_dev.get_vf_stats        = mlx5_ib_get_vf_stats;
+               dev->ib_dev.set_vf_guid         = mlx5_ib_set_vf_guid;
+       }
 
        mlx5_ib_internal_fill_odp_caps(dev);
 
+       if (MLX5_CAP_GEN(mdev, imaicl)) {
+               dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
+               dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
+               dev->ib_dev.uverbs_cmd_mask |=
+                       (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
+                       (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
+       }
+
        if (MLX5_CAP_GEN(mdev, xrc)) {
                dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
                dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
index d2b9737baa3675b1dc9ced794767f1790ada35ee..f16c818ad2e62f6cd507b80b096a06d3c0e73164 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/mlx5/srq.h>
 #include <linux/types.h>
 #include <linux/mlx5/transobj.h>
+#include <rdma/ib_user_verbs.h>
 
 #define mlx5_ib_dbg(dev, format, arg...)                               \
 pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,   \
@@ -126,7 +127,7 @@ struct mlx5_ib_pd {
 };
 
 #define MLX5_IB_FLOW_MCAST_PRIO                (MLX5_BY_PASS_NUM_PRIOS - 1)
-#define MLX5_IB_FLOW_LAST_PRIO         (MLX5_IB_FLOW_MCAST_PRIO - 1)
+#define MLX5_IB_FLOW_LAST_PRIO         (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
 #error "Invalid number of bypass priorities"
 #endif
@@ -162,9 +163,31 @@ struct mlx5_ib_flow_db {
 #define MLX5_IB_SEND_UMR_UNREG IB_SEND_RESERVED_START
 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 1)
 #define MLX5_IB_SEND_UMR_UPDATE_MTT (IB_SEND_RESERVED_START << 2)
+
+#define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION    (IB_SEND_RESERVED_START << 3)
+#define MLX5_IB_SEND_UMR_UPDATE_PD             (IB_SEND_RESERVED_START << 4)
+#define MLX5_IB_SEND_UMR_UPDATE_ACCESS         IB_SEND_RESERVED_END
+
 #define MLX5_IB_QPT_REG_UMR    IB_QPT_RESERVED1
+/*
+ * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
+ * creates the actual hardware QP.
+ */
+#define MLX5_IB_QPT_HW_GSI     IB_QPT_RESERVED2
 #define MLX5_IB_WR_UMR         IB_WR_RESERVED1
 
+/* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
+ *
+ * These flags are intended for internal use by the mlx5_ib driver, and they
+ * rely on the range reserved for that use in the ib_qp_create_flags enum.
+ */
+
+/* Create a UD QP whose source QP number is 1 */
+static inline enum ib_qp_create_flags mlx5_ib_create_qp_sqpn_qp1(void)
+{
+       return IB_QP_CREATE_RESERVED_START;
+}
+
 struct wr_list {
        u16     opcode;
        u16     next;
@@ -325,11 +348,14 @@ struct mlx5_ib_cq_buf {
 };
 
 enum mlx5_ib_qp_flags {
-       MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 0,
-       MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 1,
-       MLX5_IB_QP_CROSS_CHANNEL                = 1 << 2,
-       MLX5_IB_QP_MANAGED_SEND                 = 1 << 3,
-       MLX5_IB_QP_MANAGED_RECV                 = 1 << 4,
+       MLX5_IB_QP_LSO                          = IB_QP_CREATE_IPOIB_UD_LSO,
+       MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
+       MLX5_IB_QP_CROSS_CHANNEL            = IB_QP_CREATE_CROSS_CHANNEL,
+       MLX5_IB_QP_MANAGED_SEND             = IB_QP_CREATE_MANAGED_SEND,
+       MLX5_IB_QP_MANAGED_RECV             = IB_QP_CREATE_MANAGED_RECV,
+       MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 5,
+       /* QP uses 1 as its source QP number */
+       MLX5_IB_QP_SQPN_QP1                     = 1 << 6,
 };
 
 struct mlx5_umr_wr {
@@ -373,6 +399,14 @@ struct mlx5_ib_cq {
        struct ib_umem         *resize_umem;
        int                     cqe_size;
        u32                     create_flags;
+       struct list_head        wc_list;
+       enum ib_cq_notify_flags notify_flags;
+       struct work_struct      notify_work;
+};
+
+struct mlx5_ib_wc {
+       struct ib_wc wc;
+       struct list_head list;
 };
 
 struct mlx5_ib_srq {
@@ -413,7 +447,8 @@ struct mlx5_ib_mr {
        int                     ndescs;
        int                     max_descs;
        int                     desc_size;
-       struct mlx5_core_mr     mmr;
+       int                     access_mode;
+       struct mlx5_core_mkey   mmkey;
        struct ib_umem         *umem;
        struct mlx5_shared_mr_info      *smr_info;
        struct list_head        list;
@@ -425,19 +460,20 @@ struct mlx5_ib_mr {
        struct mlx5_core_sig_ctx    *sig;
        int                     live;
        void                    *descs_alloc;
+       int                     access_flags; /* Needed for rereg MR */
+};
+
+struct mlx5_ib_mw {
+       struct ib_mw            ibmw;
+       struct mlx5_core_mkey   mmkey;
 };
 
 struct mlx5_ib_umr_context {
+       struct ib_cqe           cqe;
        enum ib_wc_status       status;
        struct completion       done;
 };
 
-static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
-{
-       context->status = -1;
-       init_completion(&context->done);
-}
-
 struct umr_common {
        struct ib_pd    *pd;
        struct ib_cq    *cq;
@@ -487,6 +523,14 @@ struct mlx5_mr_cache {
        unsigned long           last_add;
 };
 
+struct mlx5_ib_gsi_qp;
+
+struct mlx5_ib_port_resources {
+       struct mlx5_ib_resources *devr;
+       struct mlx5_ib_gsi_qp *gsi;
+       struct work_struct pkey_change_work;
+};
+
 struct mlx5_ib_resources {
        struct ib_cq    *c0;
        struct ib_xrcd  *x0;
@@ -494,6 +538,9 @@ struct mlx5_ib_resources {
        struct ib_pd    *p0;
        struct ib_srq   *s0;
        struct ib_srq   *s1;
+       struct mlx5_ib_port_resources ports[2];
+       /* Protects changes to the port resources */
+       struct mutex    mutex;
 };
 
 struct mlx5_roce {
@@ -558,9 +605,9 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
        return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
 }
 
-static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
+static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
 {
-       return container_of(mmr, struct mlx5_ib_mr, mmr);
+       return container_of(mmkey, struct mlx5_ib_mr, mmkey);
 }
 
 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
@@ -588,6 +635,11 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
        return container_of(ibmr, struct mlx5_ib_mr, ibmr);
 }
 
+static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
+{
+       return container_of(ibmw, struct mlx5_ib_mw, ibmw);
+}
+
 struct mlx5_ib_ah {
        struct ib_ah            ibah;
        struct mlx5_av          av;
@@ -648,8 +700,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata);
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata);
+int mlx5_ib_dealloc_mw(struct ib_mw *mw);
 int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index,
                       int npages, int zap);
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+                         u64 length, u64 virt_addr, int access_flags,
+                         struct ib_pd *pd, struct ib_udata *udata);
 int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
                               enum ib_mr_type mr_type,
@@ -700,7 +758,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
                            struct ib_mr_status *mr_status);
 
@@ -719,6 +776,14 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp);
 void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
                              unsigned long end);
+int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
+                         u8 port, struct ifla_vf_info *info);
+int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
+                             u8 port, int state);
+int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
+                        u8 port, struct ifla_vf_stats *stats);
+int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
+                       u64 guid, int type);
 
 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
 static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
@@ -739,6 +804,23 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp)  {}
 __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
                               int index);
 
+/* GSI QP helper functions */
+struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
+                                   struct ib_qp_init_attr *init_attr);
+int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp);
+int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
+                         int attr_mask);
+int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+                        int qp_attr_mask,
+                        struct ib_qp_init_attr *qp_init_attr);
+int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
+                         struct ib_send_wr **bad_wr);
+int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,
+                         struct ib_recv_wr **bad_wr);
+void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
+
+int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
+
 static inline void init_query_mad(struct ib_smp *mad)
 {
        mad->base_version  = 1;
@@ -758,7 +840,7 @@ static inline u8 convert_access(int acc)
 
 static inline int is_qp1(enum ib_qp_type qp_type)
 {
-       return qp_type == IB_QPT_GSI;
+       return qp_type == MLX5_IB_QPT_HW_GSI;
 }
 
 #define MLX5_MAX_UMR_SHIFT 16
index 6000f7aeede94301cbb7c281bfaae26299770106..4d5bff151cdf09957942f35255d84df2286f572b 100644 (file)
@@ -40,6 +40,7 @@
 #include <rdma/ib_umem_odp.h>
 #include <rdma/ib_verbs.h>
 #include "mlx5_ib.h"
+#include "user.h"
 
 enum {
        MAX_PENDING_REG_MR = 8,
@@ -57,7 +58,7 @@ static int clean_mr(struct mlx5_ib_mr *mr);
 
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
-       int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
+       int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        /* Wait until all page fault handlers using the mr complete. */
@@ -77,6 +78,40 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
                return order - cache->ent[0].order;
 }
 
+static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
+{
+       return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
+               length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
+}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void update_odp_mr(struct mlx5_ib_mr *mr)
+{
+       if (mr->umem->odp_data) {
+               /*
+                * This barrier prevents the compiler from moving the
+                * setting of umem->odp_data->private to point to our
+                * MR, before reg_umr finished, to ensure that the MR
+                * initialization have finished before starting to
+                * handle invalidations.
+                */
+               smp_wmb();
+               mr->umem->odp_data->private = mr;
+               /*
+                * Make sure we will see the new
+                * umem->odp_data->private value in the invalidation
+                * routines, before we can get page faults on the
+                * MR. Page faults can happen once we put the MR in
+                * the tree, below this line. Without the barrier,
+                * there can be a fault handling and an invalidation
+                * before umem->odp_data->private == mr is visible to
+                * the invalidation handler.
+                */
+               smp_wmb();
+       }
+}
+#endif
+
 static void reg_mr_callback(int status, void *context)
 {
        struct mlx5_ib_mr *mr = context;
@@ -86,7 +121,7 @@ static void reg_mr_callback(int status, void *context)
        struct mlx5_cache_ent *ent = &cache->ent[c];
        u8 key;
        unsigned long flags;
-       struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
+       struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
        int err;
 
        spin_lock_irqsave(&ent->lock, flags);
@@ -113,7 +148,7 @@ static void reg_mr_callback(int status, void *context)
        spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
        key = dev->mdev->priv.mkey_key++;
        spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
-       mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
+       mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
 
        cache->last_add = jiffies;
 
@@ -124,10 +159,10 @@ static void reg_mr_callback(int status, void *context)
        spin_unlock_irqrestore(&ent->lock, flags);
 
        write_lock_irqsave(&table->lock, flags);
-       err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
-                               &mr->mmr);
+       err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
+                               &mr->mmkey);
        if (err)
-               pr_err("Error inserting to mr tree. 0x%x\n", -err);
+               pr_err("Error inserting to mkey tree. 0x%x\n", -err);
        write_unlock_irqrestore(&table->lock, flags);
 }
 
@@ -168,7 +203,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                spin_lock_irq(&ent->lock);
                ent->pending++;
                spin_unlock_irq(&ent->lock);
-               err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
+               err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
                                            sizeof(*in), reg_mr_callback,
                                            mr, &mr->out);
                if (err) {
@@ -657,14 +692,14 @@ struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
        seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        seg->start_addr = 0;
 
-       err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
+       err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
                                    NULL);
        if (err)
                goto err_in;
 
        kfree(in);
-       mr->ibmr.lkey = mr->mmr.key;
-       mr->ibmr.rkey = mr->mmr.key;
+       mr->ibmr.lkey = mr->mmkey.key;
+       mr->ibmr.rkey = mr->mmkey.key;
        mr->umem = NULL;
 
        return &mr->ibmr;
@@ -693,10 +728,40 @@ static int use_umr(int order)
        return order <= MLX5_MAX_UMR_SHIFT;
 }
 
-static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
-                            struct ib_sge *sg, u64 dma, int n, u32 key,
-                            int page_shift, u64 virt_addr, u64 len,
-                            int access_flags)
+static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
+                         int npages, int page_shift, int *size,
+                         __be64 **mr_pas, dma_addr_t *dma)
+{
+       __be64 *pas;
+       struct device *ddev = dev->ib_dev.dma_device;
+
+       /*
+        * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+        * To avoid copying garbage after the pas array, we allocate
+        * a little more.
+        */
+       *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
+       *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
+       if (!(*mr_pas))
+               return -ENOMEM;
+
+       pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
+       mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
+       /* Clear padding after the actual pages. */
+       memset(pas + npages, 0, *size - npages * sizeof(u64));
+
+       *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
+       if (dma_mapping_error(ddev, *dma)) {
+               kfree(*mr_pas);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
+                               struct ib_sge *sg, u64 dma, int n, u32 key,
+                               int page_shift)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_umr_wr *umrwr = umr_wr(wr);
@@ -706,7 +771,6 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
        sg->lkey = dev->umrc.pd->local_dma_lkey;
 
        wr->next = NULL;
-       wr->send_flags = 0;
        wr->sg_list = sg;
        if (n)
                wr->num_sge = 1;
@@ -718,6 +782,19 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
        umrwr->npages = n;
        umrwr->page_shift = page_shift;
        umrwr->mkey = key;
+}
+
+static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
+                            struct ib_sge *sg, u64 dma, int n, u32 key,
+                            int page_shift, u64 virt_addr, u64 len,
+                            int access_flags)
+{
+       struct mlx5_umr_wr *umrwr = umr_wr(wr);
+
+       prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
+
+       wr->send_flags = 0;
+
        umrwr->target.virt_addr = virt_addr;
        umrwr->length = len;
        umrwr->access_flags = access_flags;
@@ -734,26 +811,45 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
        umrwr->mkey = key;
 }
 
-void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
+static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
+                                  int access_flags, int *npages,
+                                  int *page_shift, int *ncont, int *order)
 {
-       struct mlx5_ib_umr_context *context;
-       struct ib_wc wc;
-       int err;
-
-       while (1) {
-               err = ib_poll_cq(cq, 1, &wc);
-               if (err < 0) {
-                       pr_warn("poll cq error %d\n", err);
-                       return;
-               }
-               if (err == 0)
-                       break;
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
+                                          access_flags, 0);
+       if (IS_ERR(umem)) {
+               mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
+               return (void *)umem;
+       }
 
-               context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
-               context->status = wc.status;
-               complete(&context->done);
+       mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
+       if (!*npages) {
+               mlx5_ib_warn(dev, "avoid zero region\n");
+               ib_umem_release(umem);
+               return ERR_PTR(-EINVAL);
        }
-       ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+       mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
+                   *npages, *ncont, *order, *page_shift);
+
+       return umem;
+}
+
+static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct mlx5_ib_umr_context *context =
+               container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
+
+       context->status = wc->status;
+       complete(&context->done);
+}
+
+static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
+{
+       context->cqe.done = mlx5_ib_umr_done;
+       context->status = -1;
+       init_completion(&context->done);
 }
 
 static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
@@ -764,13 +860,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        struct device *ddev = dev->ib_dev.dma_device;
        struct umr_common *umrc = &dev->umrc;
        struct mlx5_ib_umr_context umr_context;
-       struct mlx5_umr_wr umrwr;
+       struct mlx5_umr_wr umrwr = {};
        struct ib_send_wr *bad;
        struct mlx5_ib_mr *mr;
        struct ib_sge sg;
        int size;
        __be64 *mr_pas;
-       __be64 *pas;
        dma_addr_t dma;
        int err = 0;
        int i;
@@ -790,33 +885,17 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
        if (!mr)
                return ERR_PTR(-EAGAIN);
 
-       /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
-        * To avoid copying garbage after the pas array, we allocate
-        * a little more. */
-       size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
-       mr_pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
-       if (!mr_pas) {
-               err = -ENOMEM;
+       err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
+                            &dma);
+       if (err)
                goto free_mr;
-       }
 
-       pas = PTR_ALIGN(mr_pas, MLX5_UMR_ALIGN);
-       mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
-       /* Clear padding after the actual pages. */
-       memset(pas + npages, 0, size - npages * sizeof(u64));
-
-       dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
-       if (dma_mapping_error(ddev, dma)) {
-               err = -ENOMEM;
-               goto free_pas;
-       }
+       mlx5_ib_init_umr_context(&umr_context);
 
-       memset(&umrwr, 0, sizeof(umrwr));
-       umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
-       prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key,
+       umrwr.wr.wr_cqe = &umr_context.cqe;
+       prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
                         page_shift, virt_addr, len, access_flags);
 
-       mlx5_ib_init_umr_context(&umr_context);
        down(&umrc->sem);
        err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
        if (err) {
@@ -830,9 +909,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
                }
        }
 
-       mr->mmr.iova = virt_addr;
-       mr->mmr.size = len;
-       mr->mmr.pd = to_mpd(pd)->pdn;
+       mr->mmkey.iova = virt_addr;
+       mr->mmkey.size = len;
+       mr->mmkey.pd = to_mpd(pd)->pdn;
 
        mr->live = 1;
 
@@ -840,7 +919,6 @@ unmap_dma:
        up(&umrc->sem);
        dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
 
-free_pas:
        kfree(mr_pas);
 
 free_mr:
@@ -929,8 +1007,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
 
                dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
 
+               mlx5_ib_init_umr_context(&umr_context);
+
                memset(&wr, 0, sizeof(wr));
-               wr.wr.wr_id = (u64)(unsigned long)&umr_context;
+               wr.wr.wr_cqe = &umr_context.cqe;
 
                sg.addr = dma;
                sg.length = ALIGN(npages * sizeof(u64),
@@ -944,10 +1024,9 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
                wr.wr.opcode = MLX5_IB_WR_UMR;
                wr.npages = sg.length / sizeof(u64);
                wr.page_shift = PAGE_SHIFT;
-               wr.mkey = mr->mmr.key;
+               wr.mkey = mr->mmkey.key;
                wr.target.offset = start_page_index;
 
-               mlx5_ib_init_umr_context(&umr_context);
                down(&umrc->sem);
                err = ib_post_send(umrc->qp, &wr.wr, &bad);
                if (err) {
@@ -974,10 +1053,14 @@ free_pas:
 }
 #endif
 
-static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
-                                    u64 length, struct ib_umem *umem,
-                                    int npages, int page_shift,
-                                    int access_flags)
+/*
+ * If ibmr is NULL it will be allocated by reg_create.
+ * Else, the given ibmr will be used.
+ */
+static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
+                                    u64 virt_addr, u64 length,
+                                    struct ib_umem *umem, int npages,
+                                    int page_shift, int access_flags)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_create_mkey_mbox_in *in;
@@ -986,7 +1069,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        int err;
        bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
 
-       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
                return ERR_PTR(-ENOMEM);
 
@@ -1013,7 +1096,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
                                                         1 << page_shift));
-       err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
+       err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
                                    NULL, NULL);
        if (err) {
                mlx5_ib_warn(dev, "create mkey failed\n");
@@ -1024,7 +1107,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        mr->live = 1;
        kvfree(in);
 
-       mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
+       mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
 
        return mr;
 
@@ -1032,11 +1115,23 @@ err_2:
        kvfree(in);
 
 err_1:
-       kfree(mr);
+       if (!ibmr)
+               kfree(mr);
 
        return ERR_PTR(err);
 }
 
+static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
+                         int npages, u64 length, int access_flags)
+{
+       mr->npages = npages;
+       atomic_add(npages, &dev->mdev->priv.reg_pages);
+       mr->ibmr.lkey = mr->mmkey.key;
+       mr->ibmr.rkey = mr->mmkey.key;
+       mr->ibmr.length = length;
+       mr->access_flags = access_flags;
+}
+
 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata)
@@ -1052,22 +1147,11 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
                    start, virt_addr, length, access_flags);
-       umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
-                          0);
-       if (IS_ERR(umem)) {
-               mlx5_ib_dbg(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
-               return (void *)umem;
-       }
+       umem = mr_umem_get(pd, start, length, access_flags, &npages,
+                          &page_shift, &ncont, &order);
 
-       mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
-       if (!npages) {
-               mlx5_ib_warn(dev, "avoid zero region\n");
-               err = -EINVAL;
-               goto error;
-       }
-
-       mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
-                   npages, ncont, order, page_shift);
+       if (IS_ERR(umem))
+               return (void *)umem;
 
        if (use_umr(order)) {
                mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
@@ -1083,45 +1167,21 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        if (!mr)
-               mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
-                               access_flags);
+               mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
+                               page_shift, access_flags);
 
        if (IS_ERR(mr)) {
                err = PTR_ERR(mr);
                goto error;
        }
 
-       mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
+       mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
 
        mr->umem = umem;
-       mr->npages = npages;
-       atomic_add(npages, &dev->mdev->priv.reg_pages);
-       mr->ibmr.lkey = mr->mmr.key;
-       mr->ibmr.rkey = mr->mmr.key;
+       set_mr_fileds(dev, mr, npages, length, access_flags);
 
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-       if (umem->odp_data) {
-               /*
-                * This barrier prevents the compiler from moving the
-                * setting of umem->odp_data->private to point to our
-                * MR, before reg_umr finished, to ensure that the MR
-                * initialization have finished before starting to
-                * handle invalidations.
-                */
-               smp_wmb();
-               mr->umem->odp_data->private = mr;
-               /*
-                * Make sure we will see the new
-                * umem->odp_data->private value in the invalidation
-                * routines, before we can get page faults on the
-                * MR. Page faults can happen once we put the MR in
-                * the tree, below this line. Without the barrier,
-                * there can be a fault handling and an invalidation
-                * before umem->odp_data->private == mr is visible to
-                * the invalidation handler.
-                */
-               smp_wmb();
-       }
+       update_odp_mr(mr);
 #endif
 
        return &mr->ibmr;
@@ -1135,15 +1195,15 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        struct umr_common *umrc = &dev->umrc;
        struct mlx5_ib_umr_context umr_context;
-       struct mlx5_umr_wr umrwr;
+       struct mlx5_umr_wr umrwr = {};
        struct ib_send_wr *bad;
        int err;
 
-       memset(&umrwr.wr, 0, sizeof(umrwr));
-       umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
-       prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key);
-
        mlx5_ib_init_umr_context(&umr_context);
+
+       umrwr.wr.wr_cqe = &umr_context.cqe;
+       prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
+
        down(&umrc->sem);
        err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
        if (err) {
@@ -1165,6 +1225,167 @@ error:
        return err;
 }
 
+static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
+                    u64 length, int npages, int page_shift, int order,
+                    int access_flags, int flags)
+{
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct device *ddev = dev->ib_dev.dma_device;
+       struct mlx5_ib_umr_context umr_context;
+       struct ib_send_wr *bad;
+       struct mlx5_umr_wr umrwr = {};
+       struct ib_sge sg;
+       struct umr_common *umrc = &dev->umrc;
+       dma_addr_t dma = 0;
+       __be64 *mr_pas = NULL;
+       int size;
+       int err;
+
+       mlx5_ib_init_umr_context(&umr_context);
+
+       umrwr.wr.wr_cqe = &umr_context.cqe;
+       umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+
+       if (flags & IB_MR_REREG_TRANS) {
+               err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
+                                    &mr_pas, &dma);
+               if (err)
+                       return err;
+
+               umrwr.target.virt_addr = virt_addr;
+               umrwr.length = length;
+               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
+       }
+
+       prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
+                           page_shift);
+
+       if (flags & IB_MR_REREG_PD) {
+               umrwr.pd = pd;
+               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
+       }
+
+       if (flags & IB_MR_REREG_ACCESS) {
+               umrwr.access_flags = access_flags;
+               umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
+       }
+
+       /* post send request to UMR QP */
+       down(&umrc->sem);
+       err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
+
+       if (err) {
+               mlx5_ib_warn(dev, "post send failed, err %d\n", err);
+       } else {
+               wait_for_completion(&umr_context.done);
+               if (umr_context.status != IB_WC_SUCCESS) {
+                       mlx5_ib_warn(dev, "reg umr failed (%u)\n",
+                                    umr_context.status);
+                       err = -EFAULT;
+               }
+       }
+
+       up(&umrc->sem);
+       if (flags & IB_MR_REREG_TRANS) {
+               dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
+               kfree(mr_pas);
+       }
+       return err;
+}
+
+int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
+                         u64 length, u64 virt_addr, int new_access_flags,
+                         struct ib_pd *new_pd, struct ib_udata *udata)
+{
+       struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
+       struct mlx5_ib_mr *mr = to_mmr(ib_mr);
+       struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
+       int access_flags = flags & IB_MR_REREG_ACCESS ?
+                           new_access_flags :
+                           mr->access_flags;
+       u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
+       u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
+       int page_shift = 0;
+       int npages = 0;
+       int ncont = 0;
+       int order = 0;
+       int err;
+
+       mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
+                   start, virt_addr, length, access_flags);
+
+       if (flags != IB_MR_REREG_PD) {
+               /*
+                * Replace umem. This needs to be done whether or not UMR is
+                * used.
+                */
+               flags |= IB_MR_REREG_TRANS;
+               ib_umem_release(mr->umem);
+               mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
+                                      &page_shift, &ncont, &order);
+               if (IS_ERR(mr->umem)) {
+                       err = PTR_ERR(mr->umem);
+                       mr->umem = NULL;
+                       return err;
+               }
+       }
+
+       if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
+               /*
+                * UMR can't be used - MKey needs to be replaced.
+                */
+               if (mr->umred) {
+                       err = unreg_umr(dev, mr);
+                       if (err)
+                               mlx5_ib_warn(dev, "Failed to unregister MR\n");
+               } else {
+                       err = destroy_mkey(dev, mr);
+                       if (err)
+                               mlx5_ib_warn(dev, "Failed to destroy MKey\n");
+               }
+               if (err)
+                       return err;
+
+               mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
+                               page_shift, access_flags);
+
+               if (IS_ERR(mr))
+                       return PTR_ERR(mr);
+
+               mr->umred = 0;
+       } else {
+               /*
+                * Send a UMR WQE
+                */
+               err = rereg_umr(pd, mr, addr, len, npages, page_shift,
+                               order, access_flags, flags);
+               if (err) {
+                       mlx5_ib_warn(dev, "Failed to rereg UMR\n");
+                       return err;
+               }
+       }
+
+       if (flags & IB_MR_REREG_PD) {
+               ib_mr->pd = pd;
+               mr->mmkey.pd = to_mpd(pd)->pdn;
+       }
+
+       if (flags & IB_MR_REREG_ACCESS)
+               mr->access_flags = access_flags;
+
+       if (flags & IB_MR_REREG_TRANS) {
+               atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
+               set_mr_fileds(dev, mr, npages, len, access_flags);
+               mr->mmkey.iova = addr;
+               mr->mmkey.size = len;
+       }
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       update_odp_mr(mr);
+#endif
+
+       return 0;
+}
+
 static int
 mlx5_alloc_priv_descs(struct ib_device *device,
                      struct mlx5_ib_mr *mr,
@@ -1236,7 +1457,7 @@ static int clean_mr(struct mlx5_ib_mr *mr)
                err = destroy_mkey(dev, mr);
                if (err) {
                        mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
-                                    mr->mmr.key, err);
+                                    mr->mmkey.key, err);
                        return err;
                }
        } else {
@@ -1300,8 +1521,8 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_ib_mr *mr;
-       int access_mode, err;
-       int ndescs = roundup(max_num_sg, 4);
+       int ndescs = ALIGN(max_num_sg, 4);
+       int err;
 
        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
@@ -1319,7 +1540,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
        in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
 
        if (mr_type == IB_MR_TYPE_MEM_REG) {
-               access_mode = MLX5_ACCESS_MODE_MTT;
+               mr->access_mode = MLX5_ACCESS_MODE_MTT;
                in->seg.log2_page_size = PAGE_SHIFT;
 
                err = mlx5_alloc_priv_descs(pd->device, mr,
@@ -1329,6 +1550,15 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
 
                mr->desc_size = sizeof(u64);
                mr->max_descs = ndescs;
+       } else if (mr_type == IB_MR_TYPE_SG_GAPS) {
+               mr->access_mode = MLX5_ACCESS_MODE_KLM;
+
+               err = mlx5_alloc_priv_descs(pd->device, mr,
+                                           ndescs, sizeof(struct mlx5_klm));
+               if (err)
+                       goto err_free_in;
+               mr->desc_size = sizeof(struct mlx5_klm);
+               mr->max_descs = ndescs;
        } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
                u32 psv_index[2];
 
@@ -1347,7 +1577,7 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
                if (err)
                        goto err_free_sig;
 
-               access_mode = MLX5_ACCESS_MODE_KLM;
+               mr->access_mode = MLX5_ACCESS_MODE_KLM;
                mr->sig->psv_memory.psv_idx = psv_index[0];
                mr->sig->psv_wire.psv_idx = psv_index[1];
 
@@ -1361,14 +1591,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
                goto err_free_in;
        }
 
-       in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
-       err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
+       in->seg.flags = MLX5_PERM_UMR_EN | mr->access_mode;
+       err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
                                    NULL, NULL, NULL);
        if (err)
                goto err_destroy_psv;
 
-       mr->ibmr.lkey = mr->mmr.key;
-       mr->ibmr.rkey = mr->mmr.key;
+       mr->ibmr.lkey = mr->mmkey.key;
+       mr->ibmr.rkey = mr->mmkey.key;
        mr->umem = NULL;
        kfree(in);
 
@@ -1395,6 +1625,88 @@ err_free:
        return ERR_PTR(err);
 }
 
+struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
+                              struct ib_udata *udata)
+{
+       struct mlx5_ib_dev *dev = to_mdev(pd->device);
+       struct mlx5_create_mkey_mbox_in *in = NULL;
+       struct mlx5_ib_mw *mw = NULL;
+       int ndescs;
+       int err;
+       struct mlx5_ib_alloc_mw req = {};
+       struct {
+               __u32   comp_mask;
+               __u32   response_length;
+       } resp = {};
+
+       err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req)));
+       if (err)
+               return ERR_PTR(err);
+
+       if (req.comp_mask || req.reserved1 || req.reserved2)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       if (udata->inlen > sizeof(req) &&
+           !ib_is_udata_cleared(udata, sizeof(req),
+                                udata->inlen - sizeof(req)))
+               return ERR_PTR(-EOPNOTSUPP);
+
+       ndescs = req.num_klms ? roundup(req.num_klms, 4) : roundup(1, 4);
+
+       mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+       in = kzalloc(sizeof(*in), GFP_KERNEL);
+       if (!mw || !in) {
+               err = -ENOMEM;
+               goto free;
+       }
+
+       in->seg.status = MLX5_MKEY_STATUS_FREE;
+       in->seg.xlt_oct_size = cpu_to_be32(ndescs);
+       in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
+       in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_KLM |
+               MLX5_PERM_LOCAL_READ;
+       if (type == IB_MW_TYPE_2)
+               in->seg.flags_pd |= cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
+       in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+       err = mlx5_core_create_mkey(dev->mdev, &mw->mmkey, in, sizeof(*in),
+                                   NULL, NULL, NULL);
+       if (err)
+               goto free;
+
+       mw->ibmw.rkey = mw->mmkey.key;
+
+       resp.response_length = min(offsetof(typeof(resp), response_length) +
+                                  sizeof(resp.response_length), udata->outlen);
+       if (resp.response_length) {
+               err = ib_copy_to_udata(udata, &resp, resp.response_length);
+               if (err) {
+                       mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+                       goto free;
+               }
+       }
+
+       kfree(in);
+       return &mw->ibmw;
+
+free:
+       kfree(mw);
+       kfree(in);
+       return ERR_PTR(err);
+}
+
+int mlx5_ib_dealloc_mw(struct ib_mw *mw)
+{
+       struct mlx5_ib_mw *mmw = to_mmw(mw);
+       int err;
+
+       err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
+                                     &mmw->mmkey);
+       if (!err)
+               kfree(mmw);
+       return err;
+}
+
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
                            struct ib_mr_status *mr_status)
 {
@@ -1436,6 +1748,32 @@ done:
        return ret;
 }
 
+static int
+mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
+                  struct scatterlist *sgl,
+                  unsigned short sg_nents)
+{
+       struct scatterlist *sg = sgl;
+       struct mlx5_klm *klms = mr->descs;
+       u32 lkey = mr->ibmr.pd->local_dma_lkey;
+       int i;
+
+       mr->ibmr.iova = sg_dma_address(sg);
+       mr->ibmr.length = 0;
+       mr->ndescs = sg_nents;
+
+       for_each_sg(sgl, sg, sg_nents, i) {
+               if (unlikely(i > mr->max_descs))
+                       break;
+               klms[i].va = cpu_to_be64(sg_dma_address(sg));
+               klms[i].bcount = cpu_to_be32(sg_dma_len(sg));
+               klms[i].key = cpu_to_be32(lkey);
+               mr->ibmr.length += sg_dma_len(sg);
+       }
+
+       return i;
+}
+
 static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
 {
        struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1463,7 +1801,10 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
                                   mr->desc_size * mr->max_descs,
                                   DMA_TO_DEVICE);
 
-       n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
+       if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+               n = mlx5_ib_sg_to_klms(mr, sg, sg_nents);
+       else
+               n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
 
        ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
                                      mr->desc_size * mr->max_descs,
index b8d76361a48dcfe29914ff13011b09d2893c851d..34e79e709c67ba2757f004fb4d9974e34d393541 100644 (file)
@@ -142,13 +142,13 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
                                                   u32 key)
 {
        u32 base_key = mlx5_base_mkey(key);
-       struct mlx5_core_mr *mmr = __mlx5_mr_lookup(dev->mdev, base_key);
-       struct mlx5_ib_mr *mr = container_of(mmr, struct mlx5_ib_mr, mmr);
+       struct mlx5_core_mkey *mmkey = __mlx5_mr_lookup(dev->mdev, base_key);
+       struct mlx5_ib_mr *mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
 
-       if (!mmr || mmr->key != key || !mr->live)
+       if (!mmkey || mmkey->key != key || !mr->live)
                return NULL;
 
-       return container_of(mmr, struct mlx5_ib_mr, mmr);
+       return container_of(mmkey, struct mlx5_ib_mr, mmkey);
 }
 
 static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
@@ -232,7 +232,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_qp *qp,
        io_virt += pfault->mpfault.bytes_committed;
        bcnt -= pfault->mpfault.bytes_committed;
 
-       start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT;
+       start_idx = (io_virt - (mr->mmkey.iova & PAGE_MASK)) >> PAGE_SHIFT;
 
        if (mr->umem->writable)
                access_mask |= ODP_WRITE_ALLOWED_BIT;
index 34cb8e87c7b8b673493285f807206c437ac42a84..8dee8bc1e0fe9aa7139cfbd7aa4d9415127e52ca 100644 (file)
@@ -58,6 +58,7 @@ enum {
 
 static const u32 mlx5_ib_opcode[] = {
        [IB_WR_SEND]                            = MLX5_OPCODE_SEND,
+       [IB_WR_LSO]                             = MLX5_OPCODE_LSO,
        [IB_WR_SEND_WITH_IMM]                   = MLX5_OPCODE_SEND_IMM,
        [IB_WR_RDMA_WRITE]                      = MLX5_OPCODE_RDMA_WRITE,
        [IB_WR_RDMA_WRITE_WITH_IMM]             = MLX5_OPCODE_RDMA_WRITE_IMM,
@@ -72,6 +73,9 @@ static const u32 mlx5_ib_opcode[] = {
        [MLX5_IB_WR_UMR]                        = MLX5_OPCODE_UMR,
 };
 
+struct mlx5_wqe_eth_pad {
+       u8 rsvd0[16];
+};
 
 static int is_qp0(enum ib_qp_type qp_type)
 {
@@ -260,11 +264,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
        return 0;
 }
 
-static int sq_overhead(enum ib_qp_type qp_type)
+static int sq_overhead(struct ib_qp_init_attr *attr)
 {
        int size = 0;
 
-       switch (qp_type) {
+       switch (attr->qp_type) {
        case IB_QPT_XRC_INI:
                size += sizeof(struct mlx5_wqe_xrc_seg);
                /* fall through */
@@ -287,8 +291,12 @@ static int sq_overhead(enum ib_qp_type qp_type)
                break;
 
        case IB_QPT_UD:
+               if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
+                       size += sizeof(struct mlx5_wqe_eth_pad) +
+                               sizeof(struct mlx5_wqe_eth_seg);
+               /* fall through */
        case IB_QPT_SMI:
-       case IB_QPT_GSI:
+       case MLX5_IB_QPT_HW_GSI:
                size += sizeof(struct mlx5_wqe_ctrl_seg) +
                        sizeof(struct mlx5_wqe_datagram_seg);
                break;
@@ -311,7 +319,7 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
        int inl_size = 0;
        int size;
 
-       size = sq_overhead(attr->qp_type);
+       size = sq_overhead(attr);
        if (size < 0)
                return size;
 
@@ -348,8 +356,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                return -EINVAL;
        }
 
-       qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
-               sizeof(struct mlx5_wqe_inline_seg);
+       qp->max_inline_data = wqe_size - sq_overhead(attr) -
+                             sizeof(struct mlx5_wqe_inline_seg);
        attr->cap.max_inline_data = qp->max_inline_data;
 
        if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
@@ -590,7 +598,7 @@ static int to_mlx5_st(enum ib_qp_type type)
        case IB_QPT_XRC_INI:
        case IB_QPT_XRC_TGT:            return MLX5_QP_ST_XRC;
        case IB_QPT_SMI:                return MLX5_QP_ST_QP0;
-       case IB_QPT_GSI:                return MLX5_QP_ST_QP1;
+       case MLX5_IB_QPT_HW_GSI:        return MLX5_QP_ST_QP1;
        case IB_QPT_RAW_IPV6:           return MLX5_QP_ST_RAW_IPV6;
        case IB_QPT_RAW_PACKET:
        case IB_QPT_RAW_ETHERTYPE:      return MLX5_QP_ST_RAW_ETHERTYPE;
@@ -783,7 +791,10 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        int err;
 
        uuari = &dev->mdev->priv.uuari;
-       if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
+       if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
+                                       IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
+                                       IB_QP_CREATE_IPOIB_UD_LSO |
+                                       mlx5_ib_create_qp_sqpn_qp1()))
                return -EINVAL;
 
        if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
@@ -828,6 +839,11 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
        (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
 
+       if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
+               (*in)->ctx.deth_sqpn = cpu_to_be32(1);
+               qp->flags |= MLX5_IB_QP_SQPN_QP1;
+       }
+
        mlx5_fill_page_array(&qp->buf, (*in)->pas);
 
        err = mlx5_db_alloc(dev->mdev, &qp->db);
@@ -1228,6 +1244,14 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
                        qp->flags |= MLX5_IB_QP_MANAGED_RECV;
        }
+
+       if (init_attr->qp_type == IB_QPT_UD &&
+           (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
+               if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
+                       mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
+                       return -EOPNOTSUPP;
+               }
+
        if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
                qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
 
@@ -1271,6 +1295,11 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                                            ucmd.sq_wqe_count, max_wqes);
                                return -EINVAL;
                        }
+                       if (init_attr->create_flags &
+                           mlx5_ib_create_qp_sqpn_qp1()) {
+                               mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
+                               return -EINVAL;
+                       }
                        err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
                                             &resp, &inlen, base);
                        if (err)
@@ -1385,6 +1414,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                /* 0xffffff means we ask to work with cqe version 0 */
                MLX5_SET(qpc, qpc, user_index, uidx);
        }
+       /* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
+       if (init_attr->qp_type == IB_QPT_UD &&
+           (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
+               qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+               MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
+               qp->flags |= MLX5_IB_QP_LSO;
+       }
 
        if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
                qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
@@ -1494,7 +1530,7 @@ static void get_cqs(struct mlx5_ib_qp *qp,
                break;
 
        case IB_QPT_SMI:
-       case IB_QPT_GSI:
+       case MLX5_IB_QPT_HW_GSI:
        case IB_QPT_RC:
        case IB_QPT_UC:
        case IB_QPT_UD:
@@ -1657,7 +1693,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
        case IB_QPT_UC:
        case IB_QPT_UD:
        case IB_QPT_SMI:
-       case IB_QPT_GSI:
+       case MLX5_IB_QPT_HW_GSI:
        case MLX5_IB_QPT_REG_UMR:
                qp = kzalloc(sizeof(*qp), GFP_KERNEL);
                if (!qp)
@@ -1686,6 +1722,9 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 
                break;
 
+       case IB_QPT_GSI:
+               return mlx5_ib_gsi_create_qp(pd, init_attr);
+
        case IB_QPT_RAW_IPV6:
        case IB_QPT_RAW_ETHERTYPE:
        case IB_QPT_MAX:
@@ -1704,6 +1743,9 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
        struct mlx5_ib_dev *dev = to_mdev(qp->device);
        struct mlx5_ib_qp *mqp = to_mqp(qp);
 
+       if (unlikely(qp->qp_type == IB_QPT_GSI))
+               return mlx5_ib_gsi_destroy_qp(qp);
+
        destroy_qp_common(dev, mqp);
 
        kfree(mqp);
@@ -2161,8 +2203,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
 
        context = &in->ctx;
        err = to_mlx5_st(ibqp->qp_type);
-       if (err < 0)
+       if (err < 0) {
+               mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
                goto out;
+       }
 
        context->flags = cpu_to_be32(err << 16);
 
@@ -2182,7 +2226,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                }
        }
 
-       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
+       if (is_sqp(ibqp->qp_type)) {
                context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
        } else if (ibqp->qp_type == IB_QPT_UD ||
                   ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
@@ -2284,6 +2328,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
                context->sq_crq_size |= cpu_to_be16(1 << 4);
 
+       if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+               context->deth_sqpn = cpu_to_be32(1);
 
        mlx5_cur = to_mlx5_state(cur_state);
        mlx5_new = to_mlx5_state(new_state);
@@ -2363,11 +2409,18 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 {
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
+       enum ib_qp_type qp_type;
        enum ib_qp_state cur_state, new_state;
        int err = -EINVAL;
        int port;
        enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
 
+       if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+               return mlx5_ib_gsi_modify_qp(ibqp, attr, attr_mask);
+
+       qp_type = (unlikely(ibqp->qp_type == MLX5_IB_QPT_HW_GSI)) ?
+               IB_QPT_GSI : ibqp->qp_type;
+
        mutex_lock(&qp->mutex);
 
        cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -2378,32 +2431,46 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
        }
 
-       if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
-           !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
-                               ll))
+       if (qp_type != MLX5_IB_QPT_REG_UMR &&
+           !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+               mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
+                           cur_state, new_state, ibqp->qp_type, attr_mask);
                goto out;
+       }
 
        if ((attr_mask & IB_QP_PORT) &&
            (attr->port_num == 0 ||
-            attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
+            attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports))) {
+               mlx5_ib_dbg(dev, "invalid port number %d. number of ports is %d\n",
+                           attr->port_num, dev->num_ports);
                goto out;
+       }
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
                if (attr->pkey_index >=
-                   dev->mdev->port_caps[port - 1].pkey_table_len)
+                   dev->mdev->port_caps[port - 1].pkey_table_len) {
+                       mlx5_ib_dbg(dev, "invalid pkey index %d\n",
+                                   attr->pkey_index);
                        goto out;
+               }
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
            attr->max_rd_atomic >
-           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
+               mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+                           attr->max_rd_atomic);
                goto out;
+       }
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
            attr->max_dest_rd_atomic >
-           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
+           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
+               mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+                           attr->max_dest_rd_atomic);
                goto out;
+       }
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
                err = 0;
@@ -2442,6 +2509,59 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
        rseg->reserved = 0;
 }
 
+static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
+                        struct ib_send_wr *wr, void *qend,
+                        struct mlx5_ib_qp *qp, int *size)
+{
+       void *seg = eseg;
+
+       memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
+
+       if (wr->send_flags & IB_SEND_IP_CSUM)
+               eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
+                                MLX5_ETH_WQE_L4_CSUM;
+
+       seg += sizeof(struct mlx5_wqe_eth_seg);
+       *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
+
+       if (wr->opcode == IB_WR_LSO) {
+               struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
+               int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
+               u64 left, leftlen, copysz;
+               void *pdata = ud_wr->header;
+
+               left = ud_wr->hlen;
+               eseg->mss = cpu_to_be16(ud_wr->mss);
+               eseg->inline_hdr_sz = cpu_to_be16(left);
+
+               /*
+                * check if there is space till the end of queue, if yes,
+                * copy all in one shot, otherwise copy till the end of queue,
+                * rollback and than the copy the left
+                */
+               leftlen = qend - (void *)eseg->inline_hdr_start;
+               copysz = min_t(u64, leftlen, left);
+
+               memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
+
+               if (likely(copysz > size_of_inl_hdr_start)) {
+                       seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
+                       *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
+               }
+
+               if (unlikely(copysz < left)) { /* the last wqe in the queue */
+                       seg = mlx5_get_send_wqe(qp, 0);
+                       left -= copysz;
+                       pdata += copysz;
+                       memcpy(seg, pdata, left);
+                       seg += ALIGN(left, 16);
+                       *size += ALIGN(left, 16) / 16;
+               }
+       }
+
+       return seg;
+}
+
 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
                             struct ib_send_wr *wr)
 {
@@ -2509,6 +2629,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
        int ndescs = mr->ndescs;
 
        memset(umr, 0, sizeof(*umr));
+
+       if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+               /* KLMs take twice the size of MTTs */
+               ndescs *= 2;
+
        umr->flags = MLX5_UMR_CHECK_NOT_FREE;
        umr->klm_octowords = get_klm_octo(ndescs);
        umr->mkey_mask = frwr_mkey_mask();
@@ -2558,6 +2683,44 @@ static __be64 get_umr_update_mtt_mask(void)
        return cpu_to_be64(result);
 }
 
+static __be64 get_umr_update_translation_mask(void)
+{
+       u64 result;
+
+       result = MLX5_MKEY_MASK_LEN |
+                MLX5_MKEY_MASK_PAGE_SIZE |
+                MLX5_MKEY_MASK_START_ADDR |
+                MLX5_MKEY_MASK_KEY |
+                MLX5_MKEY_MASK_FREE;
+
+       return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_access_mask(void)
+{
+       u64 result;
+
+       result = MLX5_MKEY_MASK_LW |
+                MLX5_MKEY_MASK_RR |
+                MLX5_MKEY_MASK_RW |
+                MLX5_MKEY_MASK_A |
+                MLX5_MKEY_MASK_KEY |
+                MLX5_MKEY_MASK_FREE;
+
+       return cpu_to_be64(result);
+}
+
+static __be64 get_umr_update_pd_mask(void)
+{
+       u64 result;
+
+       result = MLX5_MKEY_MASK_PD |
+                MLX5_MKEY_MASK_KEY |
+                MLX5_MKEY_MASK_FREE;
+
+       return cpu_to_be64(result);
+}
+
 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
                                struct ib_send_wr *wr)
 {
@@ -2576,9 +2739,15 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
                        umr->mkey_mask = get_umr_update_mtt_mask();
                        umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
                        umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
-               } else {
-                       umr->mkey_mask = get_umr_reg_mr_mask();
                }
+               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
+                       umr->mkey_mask |= get_umr_update_translation_mask();
+               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
+                       umr->mkey_mask |= get_umr_update_access_mask();
+               if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
+                       umr->mkey_mask |= get_umr_update_pd_mask();
+               if (!umr->mkey_mask)
+                       umr->mkey_mask = get_umr_reg_mr_mask();
        } else {
                umr->mkey_mask = get_umr_unreg_mr_mask();
        }
@@ -2603,13 +2772,19 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
        int ndescs = ALIGN(mr->ndescs, 8) >> 1;
 
        memset(seg, 0, sizeof(*seg));
-       seg->flags = get_umr_flags(access) | MLX5_ACCESS_MODE_MTT;
+
+       if (mr->access_mode == MLX5_ACCESS_MODE_MTT)
+               seg->log2_page_size = ilog2(mr->ibmr.page_size);
+       else if (mr->access_mode == MLX5_ACCESS_MODE_KLM)
+               /* KLMs take twice the size of MTTs */
+               ndescs *= 2;
+
+       seg->flags = get_umr_flags(access) | mr->access_mode;
        seg->qpn_mkey7_0 = cpu_to_be32((key & 0xff) | 0xffffff00);
        seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
        seg->start_addr = cpu_to_be64(mr->ibmr.iova);
        seg->len = cpu_to_be64(mr->ibmr.length);
        seg->xlt_oct_size = cpu_to_be32(ndescs);
-       seg->log2_page_size = ilog2(mr->ibmr.page_size);
 }
 
 static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
@@ -2630,7 +2805,8 @@ static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *w
 
        seg->flags = convert_access(umrwr->access_flags);
        if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
-               seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
+               if (umrwr->pd)
+                       seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
                seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
        }
        seg->len = cpu_to_be64(umrwr->length);
@@ -3196,13 +3372,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 {
        struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
-       struct mlx5_ib_qp *qp = to_mqp(ibqp);
+       struct mlx5_ib_qp *qp;
        struct mlx5_ib_mr *mr;
        struct mlx5_wqe_data_seg *dpseg;
        struct mlx5_wqe_xrc_seg *xrc;
-       struct mlx5_bf *bf = qp->bf;
+       struct mlx5_bf *bf;
        int uninitialized_var(size);
-       void *qend = qp->sq.qend;
+       void *qend;
        unsigned long flags;
        unsigned idx;
        int err = 0;
@@ -3214,6 +3390,13 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        u8 next_fence = 0;
        u8 fence;
 
+       if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+               return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
+
+       qp = to_mqp(ibqp);
+       bf = qp->bf;
+       qend = qp->sq.qend;
+
        spin_lock_irqsave(&qp->sq.lock, flags);
 
        for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -3373,16 +3556,37 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                        break;
 
-               case IB_QPT_UD:
                case IB_QPT_SMI:
-               case IB_QPT_GSI:
+               case MLX5_IB_QPT_HW_GSI:
                        set_datagram_seg(seg, wr);
                        seg += sizeof(struct mlx5_wqe_datagram_seg);
                        size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
                        if (unlikely((seg == qend)))
                                seg = mlx5_get_send_wqe(qp, 0);
                        break;
+               case IB_QPT_UD:
+                       set_datagram_seg(seg, wr);
+                       seg += sizeof(struct mlx5_wqe_datagram_seg);
+                       size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
+
+                       if (unlikely((seg == qend)))
+                               seg = mlx5_get_send_wqe(qp, 0);
+
+                       /* handle qp that supports ud offload */
+                       if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
+                               struct mlx5_wqe_eth_pad *pad;
+
+                               pad = seg;
+                               memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
+                               seg += sizeof(struct mlx5_wqe_eth_pad);
+                               size += sizeof(struct mlx5_wqe_eth_pad) / 16;
 
+                               seg = set_eth_seg(seg, wr, qend, qp, &size);
+
+                               if (unlikely((seg == qend)))
+                                       seg = mlx5_get_send_wqe(qp, 0);
+                       }
+                       break;
                case MLX5_IB_QPT_REG_UMR:
                        if (wr->opcode != MLX5_IB_WR_UMR) {
                                err = -EINVAL;
@@ -3502,6 +3706,9 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        int ind;
        int i;
 
+       if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+               return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
+
        spin_lock_irqsave(&qp->rq.lock, flags);
 
        ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
@@ -3822,6 +4029,10 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        int err = 0;
        u8 raw_packet_qp_state;
 
+       if (unlikely(ibqp->qp_type == IB_QPT_GSI))
+               return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
+                                           qp_init_attr);
+
 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        /*
         * Wait for any outstanding page faults, in case the user frees memory
@@ -3874,6 +4085,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_SEND;
        if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
                qp_init_attr->create_flags |= IB_QP_CREATE_MANAGED_RECV;
+       if (qp->flags & MLX5_IB_QP_SQPN_QP1)
+               qp_init_attr->create_flags |= mlx5_ib_create_qp_sqpn_qp1();
 
        qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
                IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
index 4659256cd95e698c3a9dad3c69407276e2117f29..3b2ddd64a371689e1533cb08c23007a4d6016b03 100644 (file)
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
 
 static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
                           struct mlx5_create_srq_mbox_in **in,
-                          struct ib_udata *udata, int buf_size, int *inlen)
+                          struct ib_udata *udata, int buf_size, int *inlen,
+                          int is_xrc)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        int ncont;
        u32 offset;
        u32 uidx = MLX5_IB_DEFAULT_UIDX;
-       int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
 
-       if (drv_data < 0)
-               return -EINVAL;
-
-       ucmdlen = (drv_data < sizeof(ucmd)) ?
-                 drv_data : sizeof(ucmd);
+       ucmdlen = min(udata->inlen, sizeof(ucmd));
 
        if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
                mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        if (ucmd.reserved0 || ucmd.reserved1)
                return -EINVAL;
 
-       if (drv_data > sizeof(ucmd) &&
+       if (udata->inlen > sizeof(ucmd) &&
            !ib_is_udata_cleared(udata, sizeof(ucmd),
-                                drv_data - sizeof(ucmd)))
+                                udata->inlen - sizeof(ucmd)))
                return -EINVAL;
 
-       err = get_srq_user_index(to_mucontext(pd->uobject->context),
-                                &ucmd, udata->inlen, &uidx);
-       if (err)
-               return err;
+       if (is_xrc) {
+               err = get_srq_user_index(to_mucontext(pd->uobject->context),
+                                        &ucmd, udata->inlen, &uidx);
+               if (err)
+                       return err;
+       }
 
        srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
 
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
        (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
        (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
 
-       if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+       if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+            is_xrc){
                xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
                                     xrc_srq_context_entry);
                MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
 
 static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
                             struct mlx5_create_srq_mbox_in **in, int buf_size,
-                            int *inlen)
+                            int *inlen, int is_xrc)
 {
        int err;
        int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
 
        (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
 
-       if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) {
+       if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
+            is_xrc){
                xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
                                     xrc_srq_context_entry);
                /* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
                    srq->msrq.max_avail_gather);
 
+       is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
+
        if (pd->uobject)
-               err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen);
+               err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
+                                     is_xrc);
        else
-               err = create_srq_kernel(dev, srq, &in, buf_size, &inlen);
+               err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
+                                       is_xrc);
 
        if (err) {
                mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                goto err_srq;
        }
 
-       is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
        in->ctx.state_log_sz = ilog2(srq->msrq.max);
        flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
        xrcdn = 0;
index b94a55404a596dab323a843dec9a817fddbaff74..61bc308bb802ce93069601e85507849a8f61e98d 100644 (file)
@@ -152,6 +152,13 @@ struct mlx5_ib_create_qp_resp {
        __u32   uuar_index;
 };
 
+struct mlx5_ib_alloc_mw {
+       __u32   comp_mask;
+       __u8    num_klms;
+       __u8    reserved1;
+       __u16   reserved2;
+};
+
 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
                                    struct mlx5_ib_create_qp *ucmd,
                                    int inlen,
index 846dc97cf260658c5edb3edd95b9ccfd0b089edb..7964eba8e7ede7b8746027ab60595bf6f86e7279 100644 (file)
@@ -2,7 +2,6 @@ config INFINIBAND_NES
        tristate "NetEffect RNIC Driver"
        depends on PCI && INET && INFINIBAND
        select LIBCRC32C
-       select INET_LRO
        ---help---
          This is the RDMA Network Interface Card (RNIC) driver for
          NetEffect Ethernet Cluster Server Adapters.
index 9f9d5c563a614c0c273368966d380c82d9b2a1a4..35cbb17bec12d93771917bfd92aa537a031443a7 100644 (file)
@@ -111,17 +111,6 @@ static struct pci_device_id nes_pci_table[] = {
 
 MODULE_DEVICE_TABLE(pci, nes_pci_table);
 
-/* registered nes netlink callbacks */
-static struct ibnl_client_cbs nes_nl_cb_table[] = {
-       [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
-       [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
-       [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
-       [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
-       [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
-       [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
-       [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
-};
-
 static int nes_inetaddr_event(struct notifier_block *, unsigned long, void *);
 static int nes_net_event(struct notifier_block *, unsigned long, void *);
 static int nes_notifiers_registered;
@@ -682,17 +671,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
        }
        nes_notifiers_registered++;
 
-       if (ibnl_add_client(RDMA_NL_NES, RDMA_NL_IWPM_NUM_OPS, nes_nl_cb_table))
-               printk(KERN_ERR PFX "%s[%u]: Failed to add netlink callback\n",
-                       __func__, __LINE__);
-
-       ret = iwpm_init(RDMA_NL_NES);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: port mapper initialization failed\n",
-                               pci_name(pcidev));
-               goto bail7;
-       }
-
        INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
 
        /* Initialize network devices */
@@ -731,7 +709,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
 
        nes_debug(NES_DBG_INIT, "netdev_count=%d, nesadapter->netdev_count=%d\n",
                        nesdev->netdev_count, nesdev->nesadapter->netdev_count);
-       ibnl_remove_client(RDMA_NL_NES);
 
        nes_notifiers_registered--;
        if (nes_notifiers_registered == 0) {
@@ -795,8 +772,6 @@ static void nes_remove(struct pci_dev *pcidev)
                                nesdev->nesadapter->netdev_count--;
                        }
                }
-       ibnl_remove_client(RDMA_NL_NES);
-       iwpm_exit(RDMA_NL_NES);
 
        nes_notifiers_registered--;
        if (nes_notifiers_registered == 0) {
index cb9f0f27308de0dd439844cc06742a74caa52899..7f0aa23aef9dada8e26fa0e4f4c69111f9ce6bef 100644 (file)
@@ -482,11 +482,11 @@ static void form_cm_frame(struct sk_buff *skb,
        iph->ttl = 0x40;
        iph->protocol = 0x06;   /* IPPROTO_TCP */
 
-       iph->saddr = htonl(cm_node->mapped_loc_addr);
-       iph->daddr = htonl(cm_node->mapped_rem_addr);
+       iph->saddr = htonl(cm_node->loc_addr);
+       iph->daddr = htonl(cm_node->rem_addr);
 
-       tcph->source = htons(cm_node->mapped_loc_port);
-       tcph->dest = htons(cm_node->mapped_rem_port);
+       tcph->source = htons(cm_node->loc_port);
+       tcph->dest = htons(cm_node->rem_port);
        tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
 
        if (flags & SET_ACK) {
@@ -525,125 +525,6 @@ static void form_cm_frame(struct sk_buff *skb,
        cm_packets_created++;
 }
 
-/*
- * nes_create_sockaddr - Record ip addr and tcp port in a sockaddr struct
- */
-static void nes_create_sockaddr(__be32 ip_addr, __be16 port,
-                               struct sockaddr_storage *addr)
-{
-       struct sockaddr_in *nes_sockaddr = (struct sockaddr_in *)addr;
-       nes_sockaddr->sin_family = AF_INET;
-       memcpy(&nes_sockaddr->sin_addr.s_addr, &ip_addr, sizeof(__be32));
-       nes_sockaddr->sin_port = port;
-}
-
-/*
- * nes_create_mapinfo - Create a mapinfo object in the port mapper data base
- */
-static int nes_create_mapinfo(struct nes_cm_info *cm_info)
-{
-       struct sockaddr_storage local_sockaddr;
-       struct sockaddr_storage mapped_sockaddr;
-
-       nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
-                               &local_sockaddr);
-       nes_create_sockaddr(htonl(cm_info->mapped_loc_addr),
-                       htons(cm_info->mapped_loc_port), &mapped_sockaddr);
-
-       return iwpm_create_mapinfo(&local_sockaddr,
-                               &mapped_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_remove_mapinfo - Remove a mapinfo object from the port mapper data base
- *                      and send a remove mapping op message to
- *                      the userspace port mapper
- */
-static int nes_remove_mapinfo(u32 loc_addr, u16 loc_port,
-                       u32 mapped_loc_addr, u16 mapped_loc_port)
-{
-       struct sockaddr_storage local_sockaddr;
-       struct sockaddr_storage mapped_sockaddr;
-
-       nes_create_sockaddr(htonl(loc_addr), htons(loc_port), &local_sockaddr);
-       nes_create_sockaddr(htonl(mapped_loc_addr), htons(mapped_loc_port),
-                               &mapped_sockaddr);
-
-       iwpm_remove_mapinfo(&local_sockaddr, &mapped_sockaddr);
-       return iwpm_remove_mapping(&local_sockaddr, RDMA_NL_NES);
-}
-
-/*
- * nes_form_pm_msg - Form a port mapper message with mapping info
- */
-static void nes_form_pm_msg(struct nes_cm_info *cm_info,
-                               struct iwpm_sa_data *pm_msg)
-{
-       nes_create_sockaddr(htonl(cm_info->loc_addr), htons(cm_info->loc_port),
-                               &pm_msg->loc_addr);
-       nes_create_sockaddr(htonl(cm_info->rem_addr), htons(cm_info->rem_port),
-                               &pm_msg->rem_addr);
-}
-
-/*
- * nes_form_reg_msg - Form a port mapper message with dev info
- */
-static void nes_form_reg_msg(struct nes_vnic *nesvnic,
-                       struct iwpm_dev_data *pm_msg)
-{
-       memcpy(pm_msg->dev_name, nesvnic->nesibdev->ibdev.name,
-                               IWPM_DEVNAME_SIZE);
-       memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
-}
-
-static void record_sockaddr_info(struct sockaddr_storage *addr_info,
-                                       nes_addr_t *ip_addr, u16 *port_num)
-{
-       struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
-
-       if (in_addr->sin_family == AF_INET) {
-               *ip_addr = ntohl(in_addr->sin_addr.s_addr);
-               *port_num = ntohs(in_addr->sin_port);
-       }
-}
-
-/*
- * nes_record_pm_msg - Save the received mapping info
- */
-static void nes_record_pm_msg(struct nes_cm_info *cm_info,
-                       struct iwpm_sa_data *pm_msg)
-{
-       record_sockaddr_info(&pm_msg->mapped_loc_addr,
-               &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
-
-       record_sockaddr_info(&pm_msg->mapped_rem_addr,
-               &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
-}
-
-/*
- * nes_get_reminfo - Get the address info of the remote connecting peer
- */
-static int nes_get_remote_addr(struct nes_cm_node *cm_node)
-{
-       struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
-       struct sockaddr_storage remote_addr;
-       int ret;
-
-       nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
-                       htons(cm_node->mapped_loc_port), &mapped_loc_addr);
-       nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
-                       htons(cm_node->mapped_rem_port), &mapped_rem_addr);
-
-       ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
-                               &remote_addr, RDMA_NL_NES);
-       if (ret)
-               nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
-       else
-               record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
-                               &cm_node->rem_port);
-       return ret;
-}
-
 /**
  * print_core - dump a cm core
  */
@@ -1266,11 +1147,10 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
                          loc_addr, loc_port,
                          cm_node->rem_addr, cm_node->rem_port,
                          rem_addr, rem_port);
-               if ((cm_node->mapped_loc_addr == loc_addr) &&
-                       (cm_node->mapped_loc_port == loc_port) &&
-                       (cm_node->mapped_rem_addr == rem_addr) &&
-                       (cm_node->mapped_rem_port == rem_port)) {
-
+               if ((cm_node->loc_addr == loc_addr) &&
+                   (cm_node->loc_port == loc_port) &&
+                   (cm_node->rem_addr == rem_addr) &&
+                   (cm_node->rem_port == rem_port)) {
                        add_ref_cm_node(cm_node);
                        spin_unlock_irqrestore(&cm_core->ht_lock, flags);
                        return cm_node;
@@ -1287,8 +1167,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
  * find_listener - find a cm node listening on this addr-port pair
  */
 static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
-                                       nes_addr_t dst_addr, u16 dst_port,
-                                       enum nes_cm_listener_state listener_state, int local)
+                                            nes_addr_t dst_addr, u16 dst_port,
+                                            enum nes_cm_listener_state listener_state)
 {
        unsigned long flags;
        struct nes_cm_listener *listen_node;
@@ -1298,13 +1178,9 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
        /* walk list and find cm_node associated with this session ID */
        spin_lock_irqsave(&cm_core->listen_list_lock, flags);
        list_for_each_entry(listen_node, &cm_core->listen_list.list, list) {
-               if (local) {
-                       listen_addr = listen_node->loc_addr;
-                       listen_port = listen_node->loc_port;
-               } else {
-                       listen_addr = listen_node->mapped_loc_addr;
-                       listen_port = listen_node->mapped_loc_port;
-               }
+               listen_addr = listen_node->loc_addr;
+               listen_port = listen_node->loc_port;
+
                /* compare node pair, return node handle if a match */
                if (((listen_addr == dst_addr) ||
                     listen_addr == 0x00000000) &&
@@ -1443,17 +1319,13 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
 
                if (listener->nesvnic) {
                        nes_manage_apbvt(listener->nesvnic,
-                               listener->mapped_loc_port,
+                               listener->loc_port,
                                PCI_FUNC(listener->nesvnic->nesdev->pcidev->devfn),
                                NES_MANAGE_APBVT_DEL);
 
-                       nes_remove_mapinfo(listener->loc_addr,
-                                       listener->loc_port,
-                                       listener->mapped_loc_addr,
-                                       listener->mapped_loc_port);
                        nes_debug(NES_DBG_NLMSG,
-                                       "Delete APBVT mapped_loc_port = %04X\n",
-                                       listener->mapped_loc_port);
+                                       "Delete APBVT loc_port = %04X\n",
+                                       listener->loc_port);
                }
 
                nes_debug(NES_DBG_CM, "destroying listener (%p)\n", listener);
@@ -1602,11 +1474,6 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
        cm_node->rem_addr = cm_info->rem_addr;
        cm_node->rem_port = cm_info->rem_port;
 
-       cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
-       cm_node->mapped_rem_addr = cm_info->mapped_rem_addr;
-       cm_node->mapped_loc_port = cm_info->mapped_loc_port;
-       cm_node->mapped_rem_port = cm_info->mapped_rem_port;
-
        cm_node->mpa_frame_rev = mpa_version;
        cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
        cm_node->mpav2_ird_ord = 0;
@@ -1655,10 +1522,10 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
        cm_node->loopbackpartner = NULL;
 
        /* get the mac addr for the remote node */
-       oldarpindex = nes_arp_table(nesdev, cm_node->mapped_rem_addr,
-                               NULL, NES_ARP_RESOLVE);
-       arpindex = nes_addr_resolve_neigh(nesvnic,
-                               cm_node->mapped_rem_addr, oldarpindex);
+       oldarpindex = nes_arp_table(nesdev, cm_node->rem_addr,
+                                   NULL, NES_ARP_RESOLVE);
+       arpindex = nes_addr_resolve_neigh(nesvnic, cm_node->rem_addr,
+                                         oldarpindex);
        if (arpindex < 0) {
                kfree(cm_node);
                return NULL;
@@ -1720,14 +1587,12 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
                mini_cm_dec_refcnt_listen(cm_core, cm_node->listener, 0);
        } else {
                if (cm_node->apbvt_set && cm_node->nesvnic) {
-                       nes_manage_apbvt(cm_node->nesvnic, cm_node->mapped_loc_port,
+                       nes_manage_apbvt(cm_node->nesvnic, cm_node->loc_port,
                                         PCI_FUNC(cm_node->nesvnic->nesdev->pcidev->devfn),
                                         NES_MANAGE_APBVT_DEL);
                }
-               nes_debug(NES_DBG_NLMSG, "Delete APBVT mapped_loc_port = %04X\n",
-                                       cm_node->mapped_loc_port);
-               nes_remove_mapinfo(cm_node->loc_addr, cm_node->loc_port,
-                       cm_node->mapped_loc_addr, cm_node->mapped_loc_port);
+               nes_debug(NES_DBG_NLMSG, "Delete APBVT loc_port = %04X\n",
+                         cm_node->loc_port);
        }
 
        atomic_dec(&cm_core->node_cnt);
@@ -2184,7 +2049,6 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                cm_node->state = NES_CM_STATE_ESTABLISHED;
                if (datasize) {
                        cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
-                       nes_get_remote_addr(cm_node);
                        handle_rcv_mpa(cm_node, skb);
                } else { /* rcvd ACK only */
                        dev_kfree_skb_any(skb);
@@ -2399,17 +2263,14 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
                        struct nes_vnic *nesvnic, struct nes_cm_info *cm_info)
 {
        struct nes_cm_listener *listener;
-       struct iwpm_dev_data pm_reg_msg;
-       struct iwpm_sa_data pm_msg;
        unsigned long flags;
-       int iwpm_err = 0;
 
        nes_debug(NES_DBG_CM, "Search for 0x%08x : 0x%04x\n",
                  cm_info->loc_addr, cm_info->loc_port);
 
        /* cannot have multiple matching listeners */
        listener = find_listener(cm_core, cm_info->loc_addr, cm_info->loc_port,
-                               NES_CM_LISTENER_EITHER_STATE, 1);
+                               NES_CM_LISTENER_EITHER_STATE);
 
        if (listener && listener->listener_state == NES_CM_LISTENER_ACTIVE_STATE) {
                /* find automatically incs ref count ??? */
@@ -2419,22 +2280,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
        }
 
        if (!listener) {
-               nes_form_reg_msg(nesvnic, &pm_reg_msg);
-               iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
-               if (iwpm_err) {
-                       nes_debug(NES_DBG_NLMSG,
-                       "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
-               }
-               if (iwpm_valid_pid() && !iwpm_err) {
-                       nes_form_pm_msg(cm_info, &pm_msg);
-                       iwpm_err = iwpm_add_mapping(&pm_msg, RDMA_NL_NES);
-                       if (iwpm_err)
-                               nes_debug(NES_DBG_NLMSG,
-                               "Port Mapper query fail (err = %d).\n", iwpm_err);
-                       else
-                               nes_record_pm_msg(cm_info, &pm_msg);
-               }
-
                /* create a CM listen node (1/2 node to compare incoming traffic to) */
                listener = kzalloc(sizeof(*listener), GFP_ATOMIC);
                if (!listener) {
@@ -2444,8 +2289,6 @@ static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *cm_core,
 
                listener->loc_addr = cm_info->loc_addr;
                listener->loc_port = cm_info->loc_port;
-               listener->mapped_loc_addr = cm_info->mapped_loc_addr;
-               listener->mapped_loc_port = cm_info->mapped_loc_port;
                listener->reused_node = 0;
 
                atomic_set(&listener->ref_count, 1);
@@ -2507,18 +2350,18 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
 
        if (cm_info->loc_addr == cm_info->rem_addr) {
                loopbackremotelistener = find_listener(cm_core,
-                       cm_node->mapped_loc_addr, cm_node->mapped_rem_port,
-                       NES_CM_LISTENER_ACTIVE_STATE, 0);
+                       cm_node->loc_addr, cm_node->rem_port,
+                       NES_CM_LISTENER_ACTIVE_STATE);
                if (loopbackremotelistener == NULL) {
                        create_event(cm_node, NES_CM_EVENT_ABORTED);
                } else {
                        loopback_cm_info = *cm_info;
                        loopback_cm_info.loc_port = cm_info->rem_port;
                        loopback_cm_info.rem_port = cm_info->loc_port;
-                       loopback_cm_info.mapped_loc_port =
-                               cm_info->mapped_rem_port;
-                       loopback_cm_info.mapped_rem_port =
-                               cm_info->mapped_loc_port;
+                       loopback_cm_info.loc_port =
+                               cm_info->rem_port;
+                       loopback_cm_info.rem_port =
+                               cm_info->loc_port;
                        loopback_cm_info.cm_id = loopbackremotelistener->cm_id;
                        loopbackremotenode = make_cm_node(cm_core, nesvnic,
                                                          &loopback_cm_info, loopbackremotelistener);
@@ -2747,12 +2590,6 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
        nfo.rem_addr = ntohl(iph->saddr);
        nfo.rem_port = ntohs(tcph->source);
 
-       /* If port mapper is available these should be mapped address info */
-       nfo.mapped_loc_addr = ntohl(iph->daddr);
-       nfo.mapped_loc_port = ntohs(tcph->dest);
-       nfo.mapped_rem_addr = ntohl(iph->saddr);
-       nfo.mapped_rem_port = ntohs(tcph->source);
-
        tmp_daddr = cpu_to_be32(iph->daddr);
        tmp_saddr = cpu_to_be32(iph->saddr);
 
@@ -2761,8 +2598,8 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
 
        do {
                cm_node = find_node(cm_core,
-                                   nfo.mapped_rem_port, nfo.mapped_rem_addr,
-                                   nfo.mapped_loc_port, nfo.mapped_loc_addr);
+                                   nfo.rem_port, nfo.rem_addr,
+                                   nfo.loc_port, nfo.loc_addr);
 
                if (!cm_node) {
                        /* Only type of packet accepted are for */
@@ -2771,9 +2608,9 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
                                skb_handled = 0;
                                break;
                        }
-                       listener = find_listener(cm_core, nfo.mapped_loc_addr,
-                                       nfo.mapped_loc_port,
-                                       NES_CM_LISTENER_ACTIVE_STATE, 0);
+                       listener = find_listener(cm_core, nfo.loc_addr,
+                                                nfo.loc_port,
+                                                NES_CM_LISTENER_ACTIVE_STATE);
                        if (!listener) {
                                nfo.cm_id = NULL;
                                nfo.conn_type = 0;
@@ -2856,12 +2693,22 @@ static struct nes_cm_core *nes_cm_alloc_core(void)
 
        nes_debug(NES_DBG_CM, "Enable QUEUE EVENTS\n");
        cm_core->event_wq = create_singlethread_workqueue("nesewq");
+       if (!cm_core->event_wq)
+               goto out_free_cmcore;
        cm_core->post_event = nes_cm_post_event;
        nes_debug(NES_DBG_CM, "Enable QUEUE DISCONNECTS\n");
        cm_core->disconn_wq = create_singlethread_workqueue("nesdwq");
+       if (!cm_core->disconn_wq)
+               goto out_free_wq;
 
        print_core(cm_core);
        return cm_core;
+
+out_free_wq:
+       destroy_workqueue(cm_core->event_wq);
+out_free_cmcore:
+       kfree(cm_core);
+       return NULL;
 }
 
 
@@ -3121,8 +2968,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                        atomic_inc(&cm_disconnects);
                        cm_event.event = IW_CM_EVENT_DISCONNECT;
                        cm_event.status = disconn_status;
-                       cm_event.local_addr = cm_id->local_addr;
-                       cm_event.remote_addr = cm_id->remote_addr;
+                       cm_event.local_addr = cm_id->m_local_addr;
+                       cm_event.remote_addr = cm_id->m_remote_addr;
                        cm_event.private_data = NULL;
                        cm_event.private_data_len = 0;
 
@@ -3148,8 +2995,8 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                        cm_event.event = IW_CM_EVENT_CLOSE;
                        cm_event.status = 0;
                        cm_event.provider_data = cm_id->provider_data;
-                       cm_event.local_addr = cm_id->local_addr;
-                       cm_event.remote_addr = cm_id->remote_addr;
+                       cm_event.local_addr = cm_id->m_local_addr;
+                       cm_event.remote_addr = cm_id->m_remote_addr;
                        cm_event.private_data = NULL;
                        cm_event.private_data_len = 0;
 
@@ -3240,8 +3087,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        u8 *start_ptr = &start_addr;
        u8 **start_buff = &start_ptr;
        u16 buff_len = 0;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
 
        ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
        if (!ibqp)
@@ -3378,11 +3225,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        nes_cm_init_tsa_conn(nesqp, cm_node);
 
        nesqp->nesqp_context->tcpPorts[0] =
-                               cpu_to_le16(cm_node->mapped_loc_port);
+                               cpu_to_le16(cm_node->loc_port);
        nesqp->nesqp_context->tcpPorts[1] =
-                               cpu_to_le16(cm_node->mapped_rem_port);
+                               cpu_to_le16(cm_node->rem_port);
 
-       nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+       nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
                (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3406,9 +3253,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        memset(&nes_quad, 0, sizeof(nes_quad));
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
-       nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
-       nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+       nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+       nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+       nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
 
        /* Produce hash key */
        crc_value = get_crc_value(&nes_quad);
@@ -3437,8 +3284,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_event.event = IW_CM_EVENT_ESTABLISHED;
        cm_event.status = 0;
        cm_event.provider_data = (void *)nesqp;
-       cm_event.local_addr = cm_id->local_addr;
-       cm_event.remote_addr = cm_id->remote_addr;
+       cm_event.local_addr = cm_id->m_local_addr;
+       cm_event.remote_addr = cm_id->m_remote_addr;
        cm_event.private_data = NULL;
        cm_event.private_data_len = 0;
        cm_event.ird = cm_node->ird_size;
@@ -3508,11 +3355,8 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct nes_cm_node *cm_node;
        struct nes_cm_info cm_info;
        int apbvt_set = 0;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-       struct iwpm_dev_data pm_reg_msg;
-       struct iwpm_sa_data pm_msg;
-       int iwpm_err = 0;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
 
        if (cm_id->remote_addr.ss_family != AF_INET)
                return -ENOSYS;
@@ -3558,37 +3402,13 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        cm_info.cm_id = cm_id;
        cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
 
-       /* No port mapper available, go with the specified peer information */
-       cm_info.mapped_loc_addr = cm_info.loc_addr;
-       cm_info.mapped_loc_port = cm_info.loc_port;
-       cm_info.mapped_rem_addr = cm_info.rem_addr;
-       cm_info.mapped_rem_port = cm_info.rem_port;
-
-       nes_form_reg_msg(nesvnic, &pm_reg_msg);
-       iwpm_err = iwpm_register_pid(&pm_reg_msg, RDMA_NL_NES);
-       if (iwpm_err) {
-               nes_debug(NES_DBG_NLMSG,
-                       "Port Mapper reg pid fail (err = %d).\n", iwpm_err);
-       }
-       if (iwpm_valid_pid() && !iwpm_err) {
-               nes_form_pm_msg(&cm_info, &pm_msg);
-               iwpm_err = iwpm_add_and_query_mapping(&pm_msg, RDMA_NL_NES);
-               if (iwpm_err)
-                       nes_debug(NES_DBG_NLMSG,
-                       "Port Mapper query fail (err = %d).\n", iwpm_err);
-               else
-                       nes_record_pm_msg(&cm_info, &pm_msg);
-       }
-
        if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
-               nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
-                       PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+               nes_manage_apbvt(nesvnic, cm_info.loc_port,
+                                PCI_FUNC(nesdev->pcidev->devfn),
+                                NES_MANAGE_APBVT_ADD);
                apbvt_set = 1;
        }
 
-       if (nes_create_mapinfo(&cm_info))
-               return -ENOMEM;
-
        cm_id->add_ref(cm_id);
 
        /* create a connect CM node connection */
@@ -3597,14 +3417,12 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                                          &cm_info);
        if (!cm_node) {
                if (apbvt_set)
-                       nes_manage_apbvt(nesvnic, cm_info.mapped_loc_port,
+                       nes_manage_apbvt(nesvnic, cm_info.loc_port,
                                         PCI_FUNC(nesdev->pcidev->devfn),
                                         NES_MANAGE_APBVT_DEL);
 
-               nes_debug(NES_DBG_NLMSG, "Delete mapped_loc_port = %04X\n",
-                               cm_info.mapped_loc_port);
-               nes_remove_mapinfo(cm_info.loc_addr, cm_info.loc_port,
-                       cm_info.mapped_loc_addr, cm_info.mapped_loc_port);
+               nes_debug(NES_DBG_NLMSG, "Delete loc_port = %04X\n",
+                         cm_info.loc_port);
                cm_id->rem_ref(cm_id);
                return -ENOMEM;
        }
@@ -3633,12 +3451,12 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
        struct nes_cm_listener *cm_node;
        struct nes_cm_info cm_info;
        int err;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
 
        nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
                  cm_id, ntohs(laddr->sin_port));
 
-       if (cm_id->local_addr.ss_family != AF_INET)
+       if (cm_id->m_local_addr.ss_family != AF_INET)
                return -ENOSYS;
        nesvnic = to_nesvnic(cm_id->device);
        if (!nesvnic)
@@ -3658,10 +3476,6 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
 
        cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
 
-       /* No port mapper available, go with the specified info */
-       cm_info.mapped_loc_addr = cm_info.loc_addr;
-       cm_info.mapped_loc_port = cm_info.loc_port;
-
        cm_node = g_cm_core->api->listen(g_cm_core, nesvnic, &cm_info);
        if (!cm_node) {
                printk(KERN_ERR "%s[%u] Error returned from listen API call\n",
@@ -3673,10 +3487,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
        cm_node->tos = cm_id->tos;
 
        if (!cm_node->reused_node) {
-               if (nes_create_mapinfo(&cm_info))
-                       return -ENOMEM;
-
-               err = nes_manage_apbvt(nesvnic, cm_node->mapped_loc_port,
+               err = nes_manage_apbvt(nesvnic, cm_node->loc_port,
                                       PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
                                       NES_MANAGE_APBVT_ADD);
                if (err) {
@@ -3786,8 +3597,8 @@ static void cm_event_connected(struct nes_cm_event *event)
        nesvnic = to_nesvnic(nesqp->ibqp.device);
        nesdev = nesvnic->nesdev;
        nesadapter = nesdev->nesadapter;
-       laddr = (struct sockaddr_in *)&cm_id->local_addr;
-       raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+       laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
+       raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
        cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
 
        if (nesqp->destroyed)
@@ -3802,10 +3613,10 @@ static void cm_event_connected(struct nes_cm_event *event)
 
        /* set the QP tsa context */
        nesqp->nesqp_context->tcpPorts[0] =
-                       cpu_to_le16(cm_node->mapped_loc_port);
+                       cpu_to_le16(cm_node->loc_port);
        nesqp->nesqp_context->tcpPorts[1] =
-                       cpu_to_le16(cm_node->mapped_rem_port);
-       nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->mapped_rem_addr);
+                       cpu_to_le16(cm_node->rem_port);
+       nesqp->nesqp_context->ip0 = cpu_to_le32(cm_node->rem_addr);
 
        nesqp->nesqp_context->misc2 |= cpu_to_le32(
                        (u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3835,9 +3646,9 @@ static void cm_event_connected(struct nes_cm_event *event)
 
        nes_quad.DstIpAdrIndex =
                cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
-       nes_quad.SrcIpadr = htonl(cm_node->mapped_rem_addr);
-       nes_quad.TcpPorts[0] = htons(cm_node->mapped_rem_port);
-       nes_quad.TcpPorts[1] = htons(cm_node->mapped_loc_port);
+       nes_quad.SrcIpadr = htonl(cm_node->rem_addr);
+       nes_quad.TcpPorts[0] = htons(cm_node->rem_port);
+       nes_quad.TcpPorts[1] = htons(cm_node->loc_port);
 
        /* Produce hash key */
        crc_value = get_crc_value(&nes_quad);
@@ -3858,14 +3669,14 @@ static void cm_event_connected(struct nes_cm_event *event)
        cm_event.provider_data = cm_id->provider_data;
        cm_event_laddr->sin_family = AF_INET;
        cm_event_laddr->sin_port = laddr->sin_port;
-       cm_event.remote_addr = cm_id->remote_addr;
+       cm_event.remote_addr = cm_id->m_remote_addr;
 
        cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
        cm_event.private_data_len = (u8)event->cm_node->mpa_frame_size;
        cm_event.ird = cm_node->ird_size;
        cm_event.ord = cm_node->ord_size;
 
-       cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+       cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
        ret = cm_id->event_handler(cm_id, &cm_event);
        nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
 
@@ -3913,8 +3724,8 @@ static void cm_event_connect_error(struct nes_cm_event *event)
        cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
        cm_event.status = -ECONNRESET;
        cm_event.provider_data = cm_id->provider_data;
-       cm_event.local_addr = cm_id->local_addr;
-       cm_event.remote_addr = cm_id->remote_addr;
+       cm_event.local_addr = cm_id->m_local_addr;
+       cm_event.remote_addr = cm_id->m_remote_addr;
        cm_event.private_data = NULL;
        cm_event.private_data_len = 0;
 
@@ -3970,8 +3781,8 @@ static void cm_event_reset(struct nes_cm_event *event)
        cm_event.event = IW_CM_EVENT_DISCONNECT;
        cm_event.status = -ECONNRESET;
        cm_event.provider_data = cm_id->provider_data;
-       cm_event.local_addr = cm_id->local_addr;
-       cm_event.remote_addr = cm_id->remote_addr;
+       cm_event.local_addr = cm_id->m_local_addr;
+       cm_event.remote_addr = cm_id->m_remote_addr;
        cm_event.private_data = NULL;
        cm_event.private_data_len = 0;
 
@@ -3981,8 +3792,8 @@ static void cm_event_reset(struct nes_cm_event *event)
        cm_event.event = IW_CM_EVENT_CLOSE;
        cm_event.status = 0;
        cm_event.provider_data = cm_id->provider_data;
-       cm_event.local_addr = cm_id->local_addr;
-       cm_event.remote_addr = cm_id->remote_addr;
+       cm_event.local_addr = cm_id->m_local_addr;
+       cm_event.remote_addr = cm_id->m_remote_addr;
        cm_event.private_data = NULL;
        cm_event.private_data_len = 0;
        nes_debug(NES_DBG_CM, "NODE %p Generating CLOSE\n", event->cm_node);
index 147c2c884227d82ad3f40cec561a91abee71f80a..d827d03e3941209cb78c61d20b838e8dd9dd2c29 100644 (file)
@@ -293,8 +293,8 @@ struct nes_cm_listener {
        struct list_head           list;
        struct nes_cm_core         *cm_core;
        u8                         loc_mac[ETH_ALEN];
-       nes_addr_t                 loc_addr, mapped_loc_addr;
-       u16                        loc_port, mapped_loc_port;
+       nes_addr_t                 loc_addr;
+       u16                        loc_port;
        struct iw_cm_id            *cm_id;
        enum nes_cm_conn_type      conn_type;
        atomic_t                   ref_count;
@@ -309,9 +309,7 @@ struct nes_cm_listener {
 /* per connection node and node state information */
 struct nes_cm_node {
        nes_addr_t                loc_addr, rem_addr;
-       nes_addr_t                mapped_loc_addr, mapped_rem_addr;
        u16                       loc_port, rem_port;
-       u16                       mapped_loc_port, mapped_rem_port;
 
        u8                        loc_mac[ETH_ALEN];
        u8                        rem_mac[ETH_ALEN];
@@ -368,11 +366,6 @@ struct nes_cm_info {
        u16 rem_port;
        nes_addr_t loc_addr;
        nes_addr_t rem_addr;
-       u16 mapped_loc_port;
-       u16 mapped_rem_port;
-       nes_addr_t mapped_loc_addr;
-       nes_addr_t mapped_rem_addr;
-
        enum nes_cm_conn_type  conn_type;
        int backlog;
 };
index 4713dd7ed76432b6d11b042e6dfd24c268f82325..a1c6481d8038b5611736da10ee8ee0e30e05928f 100644 (file)
 #include <linux/moduleparam.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
 #include <linux/if_vlan.h>
-#include <linux/inet_lro.h>
 #include <linux/slab.h>
 
 #include "nes.h"
 
-static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
-module_param(nes_lro_max_aggr, uint, 0444);
-MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation");
-
 static int wide_ppm_offset;
 module_param(wide_ppm_offset, int, 0644);
 MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm");
@@ -1642,25 +1635,6 @@ static void nes_rq_wqes_timeout(unsigned long parm)
 }
 
 
-static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
-                              void **tcph, u64 *hdr_flags, void *priv)
-{
-       unsigned int ip_len;
-       struct iphdr *iph;
-       skb_reset_network_header(skb);
-       iph = ip_hdr(skb);
-       if (iph->protocol != IPPROTO_TCP)
-               return -1;
-       ip_len = ip_hdrlen(skb);
-       skb_set_transport_header(skb, ip_len);
-       *tcph = tcp_hdr(skb);
-
-       *hdr_flags = LRO_IPV4 | LRO_TCP;
-       *iphdr = iph;
-       return 0;
-}
-
-
 /**
  * nes_init_nic_qp
  */
@@ -1895,14 +1869,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
                return -ENOMEM;
        }
 
-       nesvnic->lro_mgr.max_aggr       = nes_lro_max_aggr;
-       nesvnic->lro_mgr.max_desc       = NES_MAX_LRO_DESCRIPTORS;
-       nesvnic->lro_mgr.lro_arr        = nesvnic->lro_desc;
-       nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
-       nesvnic->lro_mgr.features       = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
-       nesvnic->lro_mgr.dev            = netdev;
-       nesvnic->lro_mgr.ip_summed      = CHECKSUM_UNNECESSARY;
-       nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
        return 0;
 }
 
@@ -2809,13 +2775,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
        u16 pkt_type;
        u16 rqes_processed = 0;
        u8 sq_cqes = 0;
-       u8 nes_use_lro = 0;
 
        head = cq->cq_head;
        cq_size = cq->cq_size;
        cq->cqes_pending = 1;
-       if (nesvnic->netdev->features & NETIF_F_LRO)
-               nes_use_lro = 1;
        do {
                if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
                                NES_NIC_CQE_VALID) {
@@ -2950,10 +2913,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
 
                                        __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag);
                                }
-                               if (nes_use_lro)
-                                       lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
-                               else
-                                       netif_receive_skb(rx_skb);
+                               napi_gro_receive(&nesvnic->napi, rx_skb);
 
 skip_rx_indicate0:
                                ;
@@ -2984,8 +2944,6 @@ skip_rx_indicate0:
 
        } while (1);
 
-       if (nes_use_lro)
-               lro_flush_all(&nesvnic->lro_mgr);
        if (sq_cqes) {
                barrier();
                /* restart the queue if it had been stopped */
index c9080208aad2ec4b022f915407d61479de40c41f..1b66ef1e9937133d98a0e41710790885e2160b08 100644 (file)
@@ -33,8 +33,6 @@
 #ifndef __NES_HW_H
 #define __NES_HW_H
 
-#include <linux/inet_lro.h>
-
 #define NES_PHY_TYPE_CX4       1
 #define NES_PHY_TYPE_1G        2
 #define NES_PHY_TYPE_ARGUS     4
@@ -1049,8 +1047,6 @@ struct nes_hw_tune_timer {
 #define NES_TIMER_ENABLE_LIMIT      4
 #define NES_MAX_LINK_INTERRUPTS     128
 #define NES_MAX_LINK_CHECK          200
-#define NES_MAX_LRO_DESCRIPTORS     32
-#define NES_LRO_MAX_AGGR            64
 
 struct nes_adapter {
        u64              fw_ver;
@@ -1263,9 +1259,6 @@ struct nes_vnic {
        u8  next_qp_nic_index;
        u8  of_device_registered;
        u8  rdma_enabled;
-       u32 lro_max_aggr;
-       struct net_lro_mgr lro_mgr;
-       struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
        struct timer_list event_timer;
        enum ib_event_type delayed_event;
        enum ib_event_type last_dispatched_event;
index 6a0bdfa0ce2e76f4741454a740426e0b891015bc..3ea9e055fdd37f27be5f8accad68aca74caa3780 100644 (file)
@@ -1085,9 +1085,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
        "Free 4Kpbls",
        "Free 256pbls",
        "Timer Inits",
-       "LRO aggregated",
-       "LRO flushed",
-       "LRO no_desc",
        "PAU CreateQPs",
        "PAU DestroyQPs",
 };
@@ -1302,9 +1299,6 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
        target_stat_values[++index] = nesadapter->free_4kpbl;
        target_stat_values[++index] = nesadapter->free_256pbl;
        target_stat_values[++index] = int_mod_timer_init;
-       target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
-       target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
-       target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
        target_stat_values[++index] = atomic_read(&pau_qps_created);
        target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
 }
@@ -1709,7 +1703,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
                netdev->hw_features |= NETIF_F_TSO;
 
        netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX;
-       netdev->hw_features |= NETIF_F_LRO;
 
        nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d,"
                        " nic_index = %d, logical_port = %d, mac_index = %d.\n",
index 8c4daf7f22ec14fc5a56b13f5ddeca9c09a142e7..fba69a39a7eb496cb80ae819c231917e48203c17 100644 (file)
@@ -56,7 +56,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr);
 /**
  * nes_alloc_mw
  */
-static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type)
+static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type,
+                                 struct ib_udata *udata)
 {
        struct nes_pd *nespd = to_nespd(ibpd);
        struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);
@@ -3768,6 +3769,8 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
        nesibdev->ibdev.iwcm->create_listen = nes_create_listen;
        nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen;
        nesibdev->ibdev.get_port_immutable   = nes_port_immutable;
+       memcpy(nesibdev->ibdev.iwcm->ifname, netdev->name,
+              sizeof(nesibdev->ibdev.iwcm->ifname));
 
        return nesibdev;
 }
index 12503f15fbd6b29830dee04d81f8fd4082319be2..45bdfa0e3b2ba988438ea1246353761e6c1fd540 100644 (file)
@@ -114,6 +114,7 @@ struct ocrdma_dev_attr {
        u8 local_ca_ack_delay;
        u8 ird;
        u8 num_ird_pages;
+       u8 udp_encap;
 };
 
 struct ocrdma_dma_mem {
@@ -356,6 +357,7 @@ struct ocrdma_ah {
        struct ocrdma_av *av;
        u16 sgid_index;
        u32 id;
+       u8 hdr_type;
 };
 
 struct ocrdma_qp_hwq_info {
@@ -598,4 +600,10 @@ static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
        return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
 }
 
+static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
+{
+       return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
+              (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
+}
+
 #endif
index 3790771f2baad2bae17c52298e9b293eee3e7bad..797362a297b23aba74e1eac2ea0349a1ed57d023 100644 (file)
 
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
+static u16 ocrdma_hdr_type_to_proto_num(int devid, u8 hdr_type)
+{
+       switch (hdr_type) {
+       case OCRDMA_L3_TYPE_IB_GRH:
+               return (u16)0x8915;
+       case OCRDMA_L3_TYPE_IPV4:
+               return (u16)0x0800;
+       case OCRDMA_L3_TYPE_IPV6:
+               return (u16)0x86dd;
+       default:
+               pr_err("ocrdma%d: Invalid network header\n", devid);
+               return 0;
+       }
+}
+
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                        struct ib_ah_attr *attr, union ib_gid *sgid,
                        int pdid, bool *isvlan, u16 vlan_tag)
 {
-       int status = 0;
+       int status;
        struct ocrdma_eth_vlan eth;
        struct ocrdma_grh grh;
        int eth_sz;
+       u16 proto_num = 0;
+       u8 nxthdr = 0x11;
+       struct iphdr ipv4;
+       union {
+               struct sockaddr     _sockaddr;
+               struct sockaddr_in  _sockaddr_in;
+               struct sockaddr_in6 _sockaddr_in6;
+       } sgid_addr, dgid_addr;
 
        memset(&eth, 0, sizeof(eth));
        memset(&grh, 0, sizeof(grh));
 
+       /* Protocol Number */
+       proto_num = ocrdma_hdr_type_to_proto_num(dev->id, ah->hdr_type);
+       if (!proto_num)
+               return -EINVAL;
+       nxthdr = (proto_num == 0x8915) ? 0x1b : 0x11;
        /* VLAN */
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
@@ -78,13 +106,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                                dev->id);
                }
                eth.eth_type = cpu_to_be16(0x8100);
-               eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+               eth.roce_eth_type = cpu_to_be16(proto_num);
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
                eth.vlan_tag = cpu_to_be16(vlan_tag);
                eth_sz = sizeof(struct ocrdma_eth_vlan);
                *isvlan = true;
        } else {
-               eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
+               eth.eth_type = cpu_to_be16(proto_num);
                eth_sz = sizeof(struct ocrdma_eth_basic);
        }
        /* MAC */
@@ -93,18 +121,33 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        if (status)
                return status;
        ah->sgid_index = attr->grh.sgid_index;
-       memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
-       memcpy(&grh.dgid[0], attr->grh.dgid.raw, sizeof(attr->grh.dgid.raw));
-
-       grh.tclass_flow = cpu_to_be32((6 << 28) |
-                       (attr->grh.traffic_class << 24) |
-                       attr->grh.flow_label);
-       /* 0x1b is next header value in GRH */
-       grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
-                       (0x1b << 8) | attr->grh.hop_limit);
        /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
-       memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+       if (ah->hdr_type == RDMA_NETWORK_IPV4) {
+               *((__be16 *)&ipv4) = htons((4 << 12) | (5 << 8) |
+                                          attr->grh.traffic_class);
+               ipv4.id = cpu_to_be16(pdid);
+               ipv4.frag_off = htons(IP_DF);
+               ipv4.tot_len = htons(0);
+               ipv4.ttl = attr->grh.hop_limit;
+               ipv4.protocol = nxthdr;
+               rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+               ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+               rdma_gid2ip(&dgid_addr._sockaddr, &attr->grh.dgid);
+               ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+               memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+       } else {
+               memcpy(&grh.sgid[0], sgid->raw, sizeof(union ib_gid));
+               grh.tclass_flow = cpu_to_be32((6 << 28) |
+                                             (attr->grh.traffic_class << 24) |
+                                             attr->grh.flow_label);
+               memcpy(&grh.dgid[0], attr->grh.dgid.raw,
+                      sizeof(attr->grh.dgid.raw));
+               grh.pdid_hoplimit = cpu_to_be32((pdid << 16) |
+                                               (nxthdr << 8) |
+                                               attr->grh.hop_limit);
+               memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
+       }
        if (*isvlan)
                ah->av->valid |= OCRDMA_AV_VLAN_VALID;
        ah->av->valid = cpu_to_le32(ah->av->valid);
@@ -128,6 +171,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
 
        if (atomic_cmpxchg(&dev->update_sl, 1, 0))
                ocrdma_init_service_level(dev);
+
        ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
        if (!ah)
                return ERR_PTR(-ENOMEM);
@@ -148,6 +192,8 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                        vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
                dev_put(sgid_attr.ndev);
        }
+       /* Get network header type for this GID */
+       ah->hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
 
        if ((pd->uctx) &&
            (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
@@ -172,6 +218,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
                *ahid_addr = 0;
                *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+               if (ocrdma_is_udp_encap_supported(dev)) {
+                       *ahid_addr |= ((u32)ah->hdr_type &
+                                      OCRDMA_AH_L3_TYPE_MASK) <<
+                                      OCRDMA_AH_L3_TYPE_SHIFT;
+               }
                if (isvlan)
                        *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
                                       OCRDMA_AH_VLAN_VALID_SHIFT);
index 04a30ae674739b87dc266194f68aa27271df653e..3856dd4c7e3d6baa8243cc451338a4bfcf3d967e 100644 (file)
 enum {
        OCRDMA_AH_ID_MASK               = 0x3FF,
        OCRDMA_AH_VLAN_VALID_MASK       = 0x01,
-       OCRDMA_AH_VLAN_VALID_SHIFT      = 0x1F
+       OCRDMA_AH_VLAN_VALID_SHIFT      = 0x1F,
+       OCRDMA_AH_L3_TYPE_MASK          = 0x03,
+       OCRDMA_AH_L3_TYPE_SHIFT         = 0x1D /* 29 bits */
 };
-
 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
index 283ca842ff7498b308fcec85a1268dcb452d56b0..16740dcb876bd4f93e2c4954d61aa51ba8579343 100644 (file)
@@ -1113,7 +1113,7 @@ mbx_err:
 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
                                 void *payload_va)
 {
-       int status = 0;
+       int status;
        struct ocrdma_mbx_rsp *rsp = payload_va;
 
        if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
@@ -1144,6 +1144,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+       attr->udp_encap = (rsp->max_pd_ca_ack_delay &
+                          OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK) >>
+                          OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT;
        attr->max_dpp_pds =
           (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
@@ -2138,7 +2141,6 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
                           enum ib_qp_state *old_ib_state)
 {
        unsigned long flags;
-       int status = 0;
        enum ocrdma_qp_state new_state;
        new_state = get_ocrdma_qp_state(new_ib_state);
 
@@ -2163,7 +2165,7 @@ int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
        qp->state = new_state;
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
-       return status;
+       return 0;
 }
 
 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
@@ -2501,7 +2503,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        union ib_gid sgid, zgid;
        struct ib_gid_attr sgid_attr;
        u32 vlan_id = 0xFFFF;
-       u8 mac_addr[6];
+       u8 mac_addr[6], hdr_type;
+       union {
+               struct sockaddr     _sockaddr;
+               struct sockaddr_in  _sockaddr_in;
+               struct sockaddr_in6 _sockaddr_in6;
+       } sgid_addr, dgid_addr;
        struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
@@ -2516,6 +2523,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->params.hop_lmt_rq_psn |=
            (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
        cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
+
+       /* GIDs */
        memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
               sizeof(cmd->params.dgid));
 
@@ -2538,6 +2547,16 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                return status;
        cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
                                (mac_addr[2] << 16) | (mac_addr[3] << 24);
+
+       hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+       if (hdr_type == RDMA_NETWORK_IPV4) {
+               rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+               rdma_gid2ip(&dgid_addr._sockaddr, &ah_attr->grh.dgid);
+               memcpy(&cmd->params.dgid[0],
+                      &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+               memcpy(&cmd->params.sgid[0],
+                      &sgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+       }
        /* convert them to LE format. */
        ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
        ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
@@ -2558,7 +2577,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                cmd->params.rnt_rc_sl_fl |=
                        (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
-
+       cmd->params.max_sge_recv_flags |= ((hdr_type <<
+                                       OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT) &
+                                       OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK);
        return 0;
 }
 
@@ -2871,7 +2892,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
                                      struct ocrdma_dcbx_cfg *dcbxcfg)
 {
-       int status = 0;
+       int status;
        dma_addr_t pa;
        struct ocrdma_mqe cmd;
 
index f38743018cb454bca0e42440ebd27071e16a1ffe..3d75f65ce87e207bf958b44fe3beb98f7666f6ea 100644 (file)
@@ -89,8 +89,10 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
                                 struct ib_port_immutable *immutable)
 {
        struct ib_port_attr attr;
+       struct ocrdma_dev *dev;
        int err;
 
+       dev = get_ocrdma_dev(ibdev);
        err = ocrdma_query_port(ibdev, port_num, &attr);
        if (err)
                return err;
@@ -98,6 +100,8 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
        immutable->pkey_tbl_len = attr.pkey_tbl_len;
        immutable->gid_tbl_len = attr.gid_tbl_len;
        immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+       if (ocrdma_is_udp_encap_supported(dev))
+               immutable->core_cap_flags |= RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP;
        immutable->max_mad_size = IB_MGMT_MAD_SIZE;
 
        return 0;
index 99dd6fdf06d7b44bcea2f0fb28b90edf54aefcc0..0efc9662c6d8758df6b269a26acb2c4f51e2b23e 100644 (file)
@@ -140,7 +140,11 @@ enum {
        OCRDMA_DB_RQ_SHIFT              = 24
 };
 
-#define OCRDMA_ROUDP_FLAGS_SHIFT       0x03
+enum {
+       OCRDMA_L3_TYPE_IB_GRH   = 0x00,
+       OCRDMA_L3_TYPE_IPV4     = 0x01,
+       OCRDMA_L3_TYPE_IPV6     = 0x02
+};
 
 #define OCRDMA_DB_CQ_RING_ID_MASK       0x3FF  /* bits 0 - 9 */
 #define OCRDMA_DB_CQ_RING_ID_EXT_MASK  0x0C00  /* bits 10-11 of qid at 12-11 */
@@ -546,7 +550,8 @@ enum {
        OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT         = 8,
        OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK          = 0xFF <<
                                OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT,
-
+       OCRDMA_MBX_QUERY_CFG_L3_TYPE_SHIFT              = 3,
+       OCRDMA_MBX_QUERY_CFG_L3_TYPE_MASK               = 0x18,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT         = 0,
        OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK          = 0xFFFF,
        OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT        = 16,
@@ -1107,6 +1112,8 @@ enum {
        OCRDMA_QP_PARAMS_STATE_MASK             = BIT(5) | BIT(6) | BIT(7),
        OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC        = BIT(8),
        OCRDMA_QP_PARAMS_FLAGS_INB_ATEN         = BIT(9),
+       OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_SHIFT    = 11,
+       OCRDMA_QP_PARAMS_FLAGS_L3_TYPE_MASK     = BIT(11) | BIT(12) | BIT(13),
        OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT     = 16,
        OCRDMA_QP_PARAMS_MAX_SGE_RECV_MASK      = 0xFFFF <<
                                        OCRDMA_QP_PARAMS_MAX_SGE_RECV_SHIFT,
@@ -1735,8 +1742,11 @@ enum {
 
        /* w1 */
        OCRDMA_CQE_UD_XFER_LEN_SHIFT    = 16,
+       OCRDMA_CQE_UD_XFER_LEN_MASK     = 0x1FFF,
        OCRDMA_CQE_PKEY_SHIFT           = 0,
        OCRDMA_CQE_PKEY_MASK            = 0xFFFF,
+       OCRDMA_CQE_UD_L3TYPE_SHIFT      = 29,
+       OCRDMA_CQE_UD_L3TYPE_MASK       = 0x07,
 
        /* w2 */
        OCRDMA_CQE_QPN_SHIFT            = 0,
@@ -1861,7 +1871,7 @@ struct ocrdma_ewqe_ud_hdr {
        u32 rsvd_dest_qpn;
        u32 qkey;
        u32 rsvd_ahid;
-       u32 rsvd;
+       u32 hdr_type;
 };
 
 /* extended wqe followed by hdr_wqe for Fast Memory register */
index 255f774080a4aae08952df0374cdcb5a4063c8d1..8bef09a8c49ff3195ac8c9df9dd01a100a93c6ac 100644 (file)
@@ -610,7 +610,7 @@ static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
 static void ocrdma_update_stats(struct ocrdma_dev *dev)
 {
        ulong now = jiffies, secs;
-       int status = 0;
+       int status;
        struct ocrdma_rdma_stats_resp *rdma_stats =
                      (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
        struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
@@ -641,7 +641,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
 {
        char tmp_str[32];
        long reset;
-       int status = 0;
+       int status;
        struct ocrdma_stats *pstats = filp->private_data;
        struct ocrdma_dev *dev = pstats->dev;
 
index 12420e4ecf3da09d552ac117adcd85049ccd891b..a8496a18e20d667614cf7b2c71408cc046c22b3d 100644 (file)
@@ -419,7 +419,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                          struct ib_udata *udata)
 {
        struct ocrdma_pd *pd = NULL;
-       int status = 0;
+       int status;
 
        pd = kzalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd)
@@ -468,7 +468,7 @@ static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
 static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
                              struct ocrdma_pd *pd)
 {
-       int status = 0;
+       int status;
 
        if (dev->pd_mgr->pd_prealloc_valid)
                status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
@@ -596,7 +596,7 @@ map_err:
 
 int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
 {
-       int status = 0;
+       int status;
        struct ocrdma_mm *mm, *tmp;
        struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
        struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
@@ -623,7 +623,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
        unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
        u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
        unsigned long len = (vma->vm_end - vma->vm_start);
-       int status = 0;
+       int status;
        bool found;
 
        if (vma->vm_start & (PAGE_SIZE - 1))
@@ -1285,7 +1285,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
                                struct ib_udata *udata, int dpp_offset,
                                int dpp_credit_lmt, int srq)
 {
-       int status = 0;
+       int status;
        u64 usr_db;
        struct ocrdma_create_qp_uresp uresp;
        struct ocrdma_pd *pd = qp->pd;
@@ -1494,9 +1494,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
         */
        if (status < 0)
                return status;
-       status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
-
-       return status;
+       return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
 }
 
 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
@@ -1949,7 +1947,7 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
                      enum ib_srq_attr_mask srq_attr_mask,
                      struct ib_udata *udata)
 {
-       int status = 0;
+       int status;
        struct ocrdma_srq *srq;
 
        srq = get_ocrdma_srq(ibsrq);
@@ -2005,6 +2003,7 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
        else
                ud_hdr->qkey = ud_wr(wr)->remote_qkey;
        ud_hdr->rsvd_ahid = ah->id;
+       ud_hdr->hdr_type = ah->hdr_type;
        if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
                hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
 }
@@ -2717,9 +2716,11 @@ static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
        return expand;
 }
 
-static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
+static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
+                                struct ocrdma_cqe *cqe)
 {
        int status;
+       u16 hdr_type = 0;
 
        status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
@@ -2728,7 +2729,17 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
        ibwc->pkey_index = 0;
        ibwc->wc_flags = IB_WC_GRH;
        ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
-                                       OCRDMA_CQE_UD_XFER_LEN_SHIFT);
+                         OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
+                         OCRDMA_CQE_UD_XFER_LEN_MASK;
+
+       if (ocrdma_is_udp_encap_supported(dev)) {
+               hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
+                           OCRDMA_CQE_UD_L3TYPE_SHIFT) &
+                           OCRDMA_CQE_UD_L3TYPE_MASK;
+               ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
+               ibwc->network_hdr_type = hdr_type;
+       }
+
        return status;
 }
 
@@ -2791,12 +2802,15 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
                                     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
 {
+       struct ocrdma_dev *dev;
+
+       dev = get_ocrdma_dev(qp->ibqp.device);
        ibwc->opcode = IB_WC_RECV;
        ibwc->qp = &qp->ibqp;
        ibwc->status = IB_WC_SUCCESS;
 
        if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
-               ocrdma_update_ud_rcqe(ibwc, cqe);
+               ocrdma_update_ud_rcqe(dev, ibwc, cqe);
        else
                ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
 
index a6f3eab0f350ef036cfb44a47fbbafe5f9bd1fc1..caec8e9c46669bf692d34a31735b86c446da1ff0 100644 (file)
@@ -244,6 +244,7 @@ struct ipoib_cm_tx {
        unsigned             tx_tail;
        unsigned long        flags;
        u32                  mtu;
+       unsigned             max_send_sge;
 };
 
 struct ipoib_cm_rx_buf {
@@ -387,9 +388,10 @@ struct ipoib_dev_priv {
        struct dentry *mcg_dentry;
        struct dentry *path_dentry;
 #endif
-       int     hca_caps;
+       u64     hca_caps;
        struct ipoib_ethtool_st ethtool;
        struct timer_list poll_timer;
+       unsigned max_send_sge;
 };
 
 struct ipoib_ah {
index 917e46ea3bf681681a4abba5e9ce6e86514d07eb..c8ed53562c9b54cfc3fd21ece14ea6b95f94c872 100644 (file)
@@ -710,6 +710,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_tx_buf *tx_req;
        int rc;
+       unsigned usable_sge = tx->max_send_sge - !!skb_headlen(skb);
 
        if (unlikely(skb->len > tx->mtu)) {
                ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
@@ -719,7 +720,23 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
                ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
                return;
        }
-
+       if (skb_shinfo(skb)->nr_frags > usable_sge) {
+               if (skb_linearize(skb) < 0) {
+                       ipoib_warn(priv, "skb could not be linearized\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+               /* Does skb_linearize return ok without reducing nr_frags? */
+               if (skb_shinfo(skb)->nr_frags > usable_sge) {
+                       ipoib_warn(priv, "too many frags after skb linearize\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+       }
        ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
                       tx->tx_head, skb->len, tx->qp->qp_num);
 
@@ -1031,7 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
        struct ib_qp *tx_qp;
 
        if (dev->features & NETIF_F_SG)
-               attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+               attr.cap.max_send_sge =
+                       min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
 
        tx_qp = ib_create_qp(priv->pd, &attr);
        if (PTR_ERR(tx_qp) == -EINVAL) {
@@ -1040,6 +1058,7 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
                attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO;
                tx_qp = ib_create_qp(priv->pd, &attr);
        }
+       tx->max_send_sge = attr.cap.max_send_sge;
        return tx_qp;
 }
 
index fa9c42ff1fb00963a47a71868ff96abb6e15189b..f0e55e47eb540c0c5fbf95e5ee00d38df14b9e97 100644 (file)
@@ -180,6 +180,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        struct sk_buff *skb;
        u64 mapping[IPOIB_UD_RX_SG];
        union ib_gid *dgid;
+       union ib_gid *sgid;
 
        ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -203,13 +204,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
                return;
        }
 
-       /*
-        * Drop packets that this interface sent, ie multicast packets
-        * that the HCA has replicated.
-        */
-       if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
-               goto repost;
-
        memcpy(mapping, priv->rx_ring[wr_id].mapping,
               IPOIB_UD_RX_SG * sizeof *mapping);
 
@@ -239,6 +233,25 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        else
                skb->pkt_type = PACKET_MULTICAST;
 
+       sgid = &((struct ib_grh *)skb->data)->sgid;
+
+       /*
+        * Drop packets that this interface sent, ie multicast packets
+        * that the HCA has replicated.
+        */
+       if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
+               int need_repost = 1;
+
+               if ((wc->wc_flags & IB_WC_GRH) &&
+                   sgid->global.interface_id != priv->local_gid.global.interface_id)
+                       need_repost = 0;
+
+               if (need_repost) {
+                       dev_kfree_skb_any(skb);
+                       goto repost;
+               }
+       }
+
        skb_pull(skb, IB_GRH_BYTES);
 
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -538,6 +551,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
        struct ipoib_tx_buf *tx_req;
        int hlen, rc;
        void *phead;
+       unsigned usable_sge = priv->max_send_sge - !!skb_headlen(skb);
 
        if (skb_is_gso(skb)) {
                hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
@@ -561,6 +575,23 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
                phead = NULL;
                hlen  = 0;
        }
+       if (skb_shinfo(skb)->nr_frags > usable_sge) {
+               if (skb_linearize(skb) < 0) {
+                       ipoib_warn(priv, "skb could not be linearized\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+               /* Does skb_linearize return ok without reducing nr_frags? */
+               if (skb_shinfo(skb)->nr_frags > usable_sge) {
+                       ipoib_warn(priv, "too many frags after skb linearize\n");
+                       ++dev->stats.tx_dropped;
+                       ++dev->stats.tx_errors;
+                       dev_kfree_skb_any(skb);
+                       return;
+               }
+       }
 
        ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
                       skb->len, address, qpn);
index 25509bbd4a050a076dba5d8bd88b718fa1a72778..80807d6e5c4cff878f25eaf0861c9c8822843676 100644 (file)
@@ -51,6 +51,7 @@
 #include <net/addrconf.h>
 #include <linux/inetdevice.h>
 #include <rdma/ib_cache.h>
+#include <linux/pci.h>
 
 #define DRV_VERSION "1.0.0"
 
@@ -1590,11 +1591,67 @@ void ipoib_dev_cleanup(struct net_device *dev)
        priv->tx_ring = NULL;
 }
 
+static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
+}
+
+static int ipoib_get_vf_config(struct net_device *dev, int vf,
+                              struct ifla_vf_info *ivf)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+       int err;
+
+       err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
+       if (err)
+               return err;
+
+       ivf->vf = vf;
+
+       return 0;
+}
+
+static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
+               return -EINVAL;
+
+       return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
+}
+
+static int ipoib_get_vf_stats(struct net_device *dev, int vf,
+                             struct ifla_vf_stats *vf_stats)
+{
+       struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+       return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
+}
+
 static const struct header_ops ipoib_header_ops = {
        .create = ipoib_hard_header,
 };
 
-static const struct net_device_ops ipoib_netdev_ops = {
+static const struct net_device_ops ipoib_netdev_ops_pf = {
+       .ndo_uninit              = ipoib_uninit,
+       .ndo_open                = ipoib_open,
+       .ndo_stop                = ipoib_stop,
+       .ndo_change_mtu          = ipoib_change_mtu,
+       .ndo_fix_features        = ipoib_fix_features,
+       .ndo_start_xmit          = ipoib_start_xmit,
+       .ndo_tx_timeout          = ipoib_timeout,
+       .ndo_set_rx_mode         = ipoib_set_mcast_list,
+       .ndo_get_iflink          = ipoib_get_iflink,
+       .ndo_set_vf_link_state   = ipoib_set_vf_link_state,
+       .ndo_get_vf_config       = ipoib_get_vf_config,
+       .ndo_get_vf_stats        = ipoib_get_vf_stats,
+       .ndo_set_vf_guid         = ipoib_set_vf_guid,
+};
+
+static const struct net_device_ops ipoib_netdev_ops_vf = {
        .ndo_uninit              = ipoib_uninit,
        .ndo_open                = ipoib_open,
        .ndo_stop                = ipoib_stop,
@@ -1610,7 +1667,11 @@ void ipoib_setup(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
 
-       dev->netdev_ops          = &ipoib_netdev_ops;
+       if (priv->hca_caps & IB_DEVICE_VIRTUAL_FUNCTION)
+               dev->netdev_ops = &ipoib_netdev_ops_vf;
+       else
+               dev->netdev_ops = &ipoib_netdev_ops_pf;
+
        dev->header_ops          = &ipoib_header_ops;
 
        ipoib_set_ethtool_ops(dev);
index d48c5bae78774663c17e72ed1e4c87475e1005ad..b809c373e40e54598a39bf555571ff80eba25277 100644 (file)
@@ -206,7 +206,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
 
        if (dev->features & NETIF_F_SG)
-               init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
+               init_attr.cap.max_send_sge =
+                       min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1);
 
        priv->qp = ib_create_qp(priv->pd, &init_attr);
        if (IS_ERR(priv->qp)) {
@@ -233,6 +234,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->rx_wr.next = NULL;
        priv->rx_wr.sg_list = priv->rx_sge;
 
+       priv->max_send_sge = init_attr.cap.max_send_sge;
+
        return 0;
 
 out_free_send_cq:
index c827c93f46c547ce7ec810a2636723a7f3b582a8..80b6bedc172f32a1d9dfad1e36ee8aa3a4a16240 100644 (file)
@@ -969,7 +969,16 @@ static umode_t iser_attr_is_visible(int param_type, int param)
 
 static int iscsi_iser_slave_alloc(struct scsi_device *sdev)
 {
-       blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
+       struct iscsi_session *session;
+       struct iser_conn *iser_conn;
+       struct ib_device *ib_dev;
+
+       session = starget_to_session(scsi_target(sdev))->dd_data;
+       iser_conn = session->leadconn->dd_data;
+       ib_dev = iser_conn->ib_conn.device->ib_device;
+
+       if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG))
+               blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K);
 
        return 0;
 }
index 95f0a64e076b9addc5b01fc17e268f211c9e6f04..0351059783b1297c133a263a44bb137e317b2566 100644 (file)
@@ -458,9 +458,6 @@ struct iser_fr_pool {
  * @comp:                iser completion context
  * @fr_pool:             connection fast registration poool
  * @pi_support:          Indicate device T10-PI support
- * @last:                last send wr to signal all flush errors were drained
- * @last_cqe:            cqe handler for last wr
- * @last_comp:           completes when all connection completions consumed
  */
 struct ib_conn {
        struct rdma_cm_id           *cma_id;
@@ -472,10 +469,7 @@ struct ib_conn {
        struct iser_comp            *comp;
        struct iser_fr_pool          fr_pool;
        bool                         pi_support;
-       struct ib_send_wr            last;
-       struct ib_cqe                last_cqe;
        struct ib_cqe                reg_cqe;
-       struct completion            last_comp;
 };
 
 /**
@@ -617,7 +611,6 @@ void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc);
 void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc);
 void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc);
 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc);
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc);
 
 void iser_task_rdma_init(struct iscsi_iser_task *task);
 
index ed54b388e7adca899bfaec181c2f4c2ba75b1f47..81ae2e30dd12540cdfeb41538b90676c81ee590c 100644 (file)
@@ -729,13 +729,6 @@ void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
        kmem_cache_free(ig.desc_cache, desc);
 }
 
-void iser_last_comp(struct ib_cq *cq, struct ib_wc *wc)
-{
-       struct ib_conn *ib_conn = wc->qp->qp_context;
-
-       complete(&ib_conn->last_comp);
-}
-
 void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
 
 {
index 40c0f4978e2f00b5173001f54ec41a4bc651b839..1b4945367e4f6d93bcf6e3612a1450e84f9f5eac 100644 (file)
@@ -252,14 +252,21 @@ void iser_free_fmr_pool(struct ib_conn *ib_conn)
 }
 
 static int
-iser_alloc_reg_res(struct ib_device *ib_device,
+iser_alloc_reg_res(struct iser_device *device,
                   struct ib_pd *pd,
                   struct iser_reg_resources *res,
                   unsigned int size)
 {
+       struct ib_device *ib_dev = device->ib_device;
+       enum ib_mr_type mr_type;
        int ret;
 
-       res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
+       if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
+       res->mr = ib_alloc_mr(pd, mr_type, size);
        if (IS_ERR(res->mr)) {
                ret = PTR_ERR(res->mr);
                iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
@@ -277,7 +284,7 @@ iser_free_reg_res(struct iser_reg_resources *rsc)
 }
 
 static int
-iser_alloc_pi_ctx(struct ib_device *ib_device,
+iser_alloc_pi_ctx(struct iser_device *device,
                  struct ib_pd *pd,
                  struct iser_fr_desc *desc,
                  unsigned int size)
@@ -291,7 +298,7 @@ iser_alloc_pi_ctx(struct ib_device *ib_device,
 
        pi_ctx = desc->pi_ctx;
 
-       ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
+       ret = iser_alloc_reg_res(device, pd, &pi_ctx->rsc, size);
        if (ret) {
                iser_err("failed to allocate reg_resources\n");
                goto alloc_reg_res_err;
@@ -324,7 +331,7 @@ iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
 }
 
 static struct iser_fr_desc *
-iser_create_fastreg_desc(struct ib_device *ib_device,
+iser_create_fastreg_desc(struct iser_device *device,
                         struct ib_pd *pd,
                         bool pi_enable,
                         unsigned int size)
@@ -336,12 +343,12 @@ iser_create_fastreg_desc(struct ib_device *ib_device,
        if (!desc)
                return ERR_PTR(-ENOMEM);
 
-       ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
+       ret = iser_alloc_reg_res(device, pd, &desc->rsc, size);
        if (ret)
                goto reg_res_alloc_failure;
 
        if (pi_enable) {
-               ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
+               ret = iser_alloc_pi_ctx(device, pd, desc, size);
                if (ret)
                        goto pi_ctx_alloc_failure;
        }
@@ -374,7 +381,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
        spin_lock_init(&fr_pool->lock);
        fr_pool->size = 0;
        for (i = 0; i < cmds_max; i++) {
-               desc = iser_create_fastreg_desc(device->ib_device, device->pd,
+               desc = iser_create_fastreg_desc(device, device->pd,
                                                ib_conn->pi_support, size);
                if (IS_ERR(desc)) {
                        ret = PTR_ERR(desc);
@@ -663,7 +670,6 @@ void iser_conn_release(struct iser_conn *iser_conn)
 int iser_conn_terminate(struct iser_conn *iser_conn)
 {
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
-       struct ib_send_wr *bad_wr;
        int err = 0;
 
        /* terminate the iser conn only if the conn state is UP */
@@ -688,14 +694,8 @@ int iser_conn_terminate(struct iser_conn *iser_conn)
                        iser_err("Failed to disconnect, conn: 0x%p err %d\n",
                                 iser_conn, err);
 
-               /* post an indication that all flush errors were consumed */
-               err = ib_post_send(ib_conn->qp, &ib_conn->last, &bad_wr);
-               if (err) {
-                       iser_err("conn %p failed to post last wr", ib_conn);
-                       return 1;
-               }
-
-               wait_for_completion(&ib_conn->last_comp);
+               /* block until all flush errors are consumed */
+               ib_drain_sq(ib_conn->qp);
        }
 
        return 1;
@@ -954,10 +954,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
 
        ib_conn->post_recv_buf_count = 0;
        ib_conn->reg_cqe.done = iser_reg_comp;
-       ib_conn->last_cqe.done = iser_last_comp;
-       ib_conn->last.wr_cqe = &ib_conn->last_cqe;
-       ib_conn->last.opcode = IB_WR_SEND;
-       init_completion(&ib_conn->last_comp);
 }
 
  /**
index 03022f6420d770a469d818ee07fb7a4a4babda3b..b6bf2049602107befe1f49eedee4494336f85e28 100644 (file)
@@ -446,49 +446,17 @@ static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
                                  dev->max_pages_per_mr);
 }
 
-static void srp_drain_done(struct ib_cq *cq, struct ib_wc *wc)
-{
-       struct srp_rdma_ch *ch = cq->cq_context;
-
-       complete(&ch->done);
-}
-
-static struct ib_cqe srp_drain_cqe = {
-       .done           = srp_drain_done,
-};
-
 /**
  * srp_destroy_qp() - destroy an RDMA queue pair
  * @ch: SRP RDMA channel.
  *
- * Change a queue pair into the error state and wait until all receive
- * completions have been processed before destroying it. This avoids that
- * the receive completion handler can access the queue pair while it is
+ * Drain the qp before destroying it.  This avoids that the receive
+ * completion handler can access the queue pair while it is
  * being destroyed.
  */
 static void srp_destroy_qp(struct srp_rdma_ch *ch)
 {
-       static struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
-       static struct ib_recv_wr wr = { 0 };
-       struct ib_recv_wr *bad_wr;
-       int ret;
-
-       wr.wr_cqe = &srp_drain_cqe;
-       /* Destroying a QP and reusing ch->done is only safe if not connected */
-       WARN_ON_ONCE(ch->connected);
-
-       ret = ib_modify_qp(ch->qp, &attr, IB_QP_STATE);
-       WARN_ONCE(ret, "ib_cm_init_qp_attr() returned %d\n", ret);
-       if (ret)
-               goto out;
-
-       init_completion(&ch->done);
-       ret = ib_post_recv(ch->qp, &wr, &bad_wr);
-       WARN_ONCE(ret, "ib_post_recv() returned %d\n", ret);
-       if (ret == 0)
-               wait_for_completion(&ch->done);
-
-out:
+       ib_drain_rq(ch->qp);
        ib_destroy_qp(ch->qp);
 }
 
@@ -508,7 +476,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
        if (!init_attr)
                return -ENOMEM;
 
-       /* queue_size + 1 for ib_drain_qp */
+       /* queue_size + 1 for ib_drain_rq() */
        recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
                                ch->comp_vector, IB_POLL_SOFTIRQ);
        if (IS_ERR(recv_cq)) {
index 0c37fee363b1d60eb3dd63ebfdc6330b2e14b562..578c3703421ddb18a5bad7c64bde26c83d546964 100644 (file)
@@ -91,76 +91,32 @@ MODULE_PARM_DESC(srpt_service_guid,
                 " instead of using the node_guid of the first HCA.");
 
 static struct ib_client srpt_client;
-static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static void srpt_release_cmd(struct se_cmd *se_cmd);
+static void srpt_free_ch(struct kref *kref);
 static int srpt_queue_status(struct se_cmd *cmd);
 static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
 static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
 
-/**
- * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
- */
-static inline
-enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
-{
-       switch (dir) {
-       case DMA_TO_DEVICE:     return DMA_FROM_DEVICE;
-       case DMA_FROM_DEVICE:   return DMA_TO_DEVICE;
-       default:                return dir;
-       }
-}
-
-/**
- * srpt_sdev_name() - Return the name associated with the HCA.
- *
- * Examples are ib0, ib1, ...
- */
-static inline const char *srpt_sdev_name(struct srpt_device *sdev)
-{
-       return sdev->device->name;
-}
-
-static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
-{
-       unsigned long flags;
-       enum rdma_ch_state state;
-
-       spin_lock_irqsave(&ch->spinlock, flags);
-       state = ch->state;
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-       return state;
-}
-
-static enum rdma_ch_state
-srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
-{
-       unsigned long flags;
-       enum rdma_ch_state prev;
-
-       spin_lock_irqsave(&ch->spinlock, flags);
-       prev = ch->state;
-       ch->state = new_state;
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-       return prev;
-}
-
-/**
- * srpt_test_and_set_ch_state() - Test and set the channel state.
- *
- * Returns true if and only if the channel state has been set to the new state.
+/*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
  */
-static bool
-srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
-                          enum rdma_ch_state new)
+static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
 {
        unsigned long flags;
        enum rdma_ch_state prev;
+       bool changed = false;
 
        spin_lock_irqsave(&ch->spinlock, flags);
        prev = ch->state;
-       if (prev == old)
+       if (new > prev) {
                ch->state = new;
+               changed = true;
+       }
        spin_unlock_irqrestore(&ch->spinlock, flags);
-       return prev == old;
+
+       return changed;
 }
 
 /**
@@ -182,7 +138,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
                return;
 
        pr_debug("ASYNC event= %d on device= %s\n", event->event,
-                srpt_sdev_name(sdev));
+                sdev->device->name);
 
        switch (event->event) {
        case IB_EVENT_PORT_ERR:
@@ -220,25 +176,39 @@ static void srpt_srq_event(struct ib_event *event, void *ctx)
        pr_info("SRQ event %d\n", event->event);
 }
 
+static const char *get_ch_state_name(enum rdma_ch_state s)
+{
+       switch (s) {
+       case CH_CONNECTING:
+               return "connecting";
+       case CH_LIVE:
+               return "live";
+       case CH_DISCONNECTING:
+               return "disconnecting";
+       case CH_DRAINING:
+               return "draining";
+       case CH_DISCONNECTED:
+               return "disconnected";
+       }
+       return "???";
+}
+
 /**
  * srpt_qp_event() - QP event callback function.
  */
 static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
 {
        pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
-                event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+                event->event, ch->cm_id, ch->sess_name, ch->state);
 
        switch (event->event) {
        case IB_EVENT_COMM_EST:
                ib_cm_notify(ch->cm_id, event->event);
                break;
        case IB_EVENT_QP_LAST_WQE_REACHED:
-               if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
-                                              CH_RELEASING))
-                       srpt_release_channel(ch);
-               else
-                       pr_debug("%s: state %d - ignored LAST_WQE.\n",
-                                ch->sess_name, srpt_get_ch_state(ch));
+               pr_debug("%s-%d, state %s: received Last WQE event.\n",
+                        ch->sess_name, ch->qp->qp_num,
+                        get_ch_state_name(ch->state));
                break;
        default:
                pr_err("received unrecognized IB QP event %d\n", event->event);
@@ -281,7 +251,7 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
        struct ib_class_port_info *cif;
 
        cif = (struct ib_class_port_info *)mad->data;
-       memset(cif, 0, sizeof *cif);
+       memset(cif, 0, sizeof(*cif));
        cif->base_version = 1;
        cif->class_version = 1;
        cif->resp_time_value = 20;
@@ -340,7 +310,7 @@ static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
                return;
        }
 
-       memset(iocp, 0, sizeof *iocp);
+       memset(iocp, 0, sizeof(*iocp));
        strcpy(iocp->id_string, SRPT_ID_STRING);
        iocp->guid = cpu_to_be64(srpt_service_guid);
        iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
@@ -390,7 +360,7 @@ static void srpt_get_svc_entries(u64 ioc_guid,
        }
 
        svc_entries = (struct ib_dm_svc_entries *)mad->data;
-       memset(svc_entries, 0, sizeof *svc_entries);
+       memset(svc_entries, 0, sizeof(*svc_entries));
        svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
        snprintf(svc_entries->service_entries[0].name,
                 sizeof(svc_entries->service_entries[0].name),
@@ -484,7 +454,7 @@ static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
        rsp->ah = ah;
 
        dm_mad = rsp->mad;
-       memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+       memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
        dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
        dm_mad->mad_hdr.status = 0;
 
@@ -532,7 +502,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
        struct ib_port_attr port_attr;
        int ret;
 
-       memset(&port_modify, 0, sizeof port_modify);
+       memset(&port_modify, 0, sizeof(port_modify));
        port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
        port_modify.clr_port_cap_mask = 0;
 
@@ -553,7 +523,7 @@ static int srpt_refresh_port(struct srpt_port *sport)
                goto err_query_port;
 
        if (!sport->mad_agent) {
-               memset(&reg_req, 0, sizeof reg_req);
+               memset(&reg_req, 0, sizeof(reg_req));
                reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
                reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
                set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
@@ -840,6 +810,39 @@ out:
        return ret;
 }
 
+/**
+ * srpt_zerolength_write() - Perform a zero-length RDMA write.
+ *
+ * A quote from the InfiniBand specification: C9-88: For an HCA responder
+ * using Reliable Connection service, for each zero-length RDMA READ or WRITE
+ * request, the R_Key shall not be validated, even if the request includes
+ * Immediate data.
+ */
+static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
+{
+       struct ib_send_wr wr, *bad_wr;
+
+       memset(&wr, 0, sizeof(wr));
+       wr.opcode = IB_WR_RDMA_WRITE;
+       wr.wr_cqe = &ch->zw_cqe;
+       wr.send_flags = IB_SEND_SIGNALED;
+       return ib_post_send(ch->qp, &wr, &bad_wr);
+}
+
+static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+       struct srpt_rdma_ch *ch = cq->cq_context;
+
+       if (wc->status == IB_WC_SUCCESS) {
+               srpt_process_wait_list(ch);
+       } else {
+               if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+                       schedule_work(&ch->release_work);
+               else
+                       WARN_ONCE(1, "%s-%d\n", ch->sess_name, ch->qp->qp_num);
+       }
+}
+
 /**
  * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
  * @ioctx: Pointer to the I/O context associated with the request.
@@ -903,14 +906,14 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
 
                db = (struct srp_direct_buf *)(srp_cmd->add_data
                                               + add_cdb_offset);
-               memcpy(ioctx->rbufs, db, sizeof *db);
+               memcpy(ioctx->rbufs, db, sizeof(*db));
                *data_len = be32_to_cpu(db->len);
        } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
                   ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
                idb = (struct srp_indirect_buf *)(srp_cmd->add_data
                                                  + add_cdb_offset);
 
-               ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+               ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof(*db);
 
                if (ioctx->n_rbuf >
                    (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
@@ -929,7 +932,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
                        ioctx->rbufs = &ioctx->single_rbuf;
                else {
                        ioctx->rbufs =
-                               kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+                               kmalloc(ioctx->n_rbuf * sizeof(*db), GFP_ATOMIC);
                        if (!ioctx->rbufs) {
                                ioctx->n_rbuf = 0;
                                ret = -ENOMEM;
@@ -938,7 +941,7 @@ static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
                }
 
                db = idb->desc_list;
-               memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+               memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof(*db));
                *data_len = be32_to_cpu(idb->len);
        }
 out:
@@ -956,7 +959,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
        struct ib_qp_attr *attr;
        int ret;
 
-       attr = kzalloc(sizeof *attr, GFP_KERNEL);
+       attr = kzalloc(sizeof(*attr), GFP_KERNEL);
        if (!attr)
                return -ENOMEM;
 
@@ -1070,7 +1073,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
                dir = ioctx->cmd.data_direction;
                BUG_ON(dir == DMA_NONE);
                ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
-                               opposite_dma_dir(dir));
+                               target_reverse_dma_direction(&ioctx->cmd));
                ioctx->mapped_sg_count = 0;
        }
 }
@@ -1107,7 +1110,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
        ioctx->sg_cnt = sg_cnt = cmd->t_data_nents;
 
        count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
-                             opposite_dma_dir(dir));
+                             target_reverse_dma_direction(cmd));
        if (unlikely(!count))
                return -EAGAIN;
 
@@ -1313,10 +1316,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
 
        /*
         * If the command is in a state where the target core is waiting for
-        * the ib_srpt driver, change the state to the next state. Changing
-        * the state of the command from SRPT_STATE_NEED_DATA to
-        * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
-        * function a second time.
+        * the ib_srpt driver, change the state to the next state.
         */
 
        spin_lock_irqsave(&ioctx->spinlock, flags);
@@ -1325,25 +1325,17 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
        case SRPT_STATE_NEED_DATA:
                ioctx->state = SRPT_STATE_DATA_IN;
                break;
-       case SRPT_STATE_DATA_IN:
        case SRPT_STATE_CMD_RSP_SENT:
        case SRPT_STATE_MGMT_RSP_SENT:
                ioctx->state = SRPT_STATE_DONE;
                break;
        default:
+               WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
+                         __func__, state);
                break;
        }
        spin_unlock_irqrestore(&ioctx->spinlock, flags);
 
-       if (state == SRPT_STATE_DONE) {
-               struct srpt_rdma_ch *ch = ioctx->ch;
-
-               BUG_ON(ch->sess == NULL);
-
-               target_put_sess_cmd(&ioctx->cmd);
-               goto out;
-       }
-
        pr_debug("Aborting cmd with state %d and tag %lld\n", state,
                 ioctx->cmd.tag);
 
@@ -1351,19 +1343,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
        case SRPT_STATE_NEW:
        case SRPT_STATE_DATA_IN:
        case SRPT_STATE_MGMT:
+       case SRPT_STATE_DONE:
                /*
                 * Do nothing - defer abort processing until
                 * srpt_queue_response() is invoked.
                 */
-               WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
                break;
        case SRPT_STATE_NEED_DATA:
-               /* DMA_TO_DEVICE (write) - RDMA read error. */
-
-               /* XXX(hch): this is a horrible layering violation.. */
-               spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
-               ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
-               spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
+               pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
+               transport_generic_request_failure(&ioctx->cmd,
+                                       TCM_CHECK_CONDITION_ABORT_CMD);
                break;
        case SRPT_STATE_CMD_RSP_SENT:
                /*
@@ -1371,18 +1360,16 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
                 * not been received in time.
                 */
                srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
-               target_put_sess_cmd(&ioctx->cmd);
+               transport_generic_free_cmd(&ioctx->cmd, 0);
                break;
        case SRPT_STATE_MGMT_RSP_SENT:
-               srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
-               target_put_sess_cmd(&ioctx->cmd);
+               transport_generic_free_cmd(&ioctx->cmd, 0);
                break;
        default:
                WARN(1, "Unexpected command state (%d)", state);
                break;
        }
 
-out:
        return state;
 }
 
@@ -1422,9 +1409,14 @@ static void srpt_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
                container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
+               /*
+                * Note: if an RDMA write error completion is received that
+                * means that a SEND also has been posted. Defer further
+                * processing of the associated command until the send error
+                * completion has been received.
+                */
                pr_info("RDMA_WRITE for ioctx 0x%p failed with status %d\n",
                        ioctx, wc->status);
-               srpt_abort_cmd(ioctx);
        }
 }
 
@@ -1464,7 +1456,7 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
        sense_data_len = ioctx->cmd.scsi_sense_length;
        WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
 
-       memset(srp_rsp, 0, sizeof *srp_rsp);
+       memset(srp_rsp, 0, sizeof(*srp_rsp));
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
                cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
@@ -1514,7 +1506,7 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
 
        srp_rsp = ioctx->ioctx.buf;
        BUG_ON(!srp_rsp);
-       memset(srp_rsp, 0, sizeof *srp_rsp);
+       memset(srp_rsp, 0, sizeof(*srp_rsp));
 
        srp_rsp->opcode = SRP_RSP;
        srp_rsp->req_lim_delta =
@@ -1528,80 +1520,6 @@ static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
        return resp_len;
 }
 
-#define NO_SUCH_LUN ((uint64_t)-1LL)
-
-/*
- * SCSI LUN addressing method. See also SAM-2 and the section about
- * eight byte LUNs.
- */
-enum scsi_lun_addr_method {
-       SCSI_LUN_ADDR_METHOD_PERIPHERAL   = 0,
-       SCSI_LUN_ADDR_METHOD_FLAT         = 1,
-       SCSI_LUN_ADDR_METHOD_LUN          = 2,
-       SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
-};
-
-/*
- * srpt_unpack_lun() - Convert from network LUN to linear LUN.
- *
- * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
- * order (big endian) to a linear LUN. Supports three LUN addressing methods:
- * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
- */
-static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
-{
-       uint64_t res = NO_SUCH_LUN;
-       int addressing_method;
-
-       if (unlikely(len < 2)) {
-               pr_err("Illegal LUN length %d, expected 2 bytes or more\n",
-                      len);
-               goto out;
-       }
-
-       switch (len) {
-       case 8:
-               if ((*((__be64 *)lun) &
-                    cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
-                       goto out_err;
-               break;
-       case 4:
-               if (*((__be16 *)&lun[2]) != 0)
-                       goto out_err;
-               break;
-       case 6:
-               if (*((__be32 *)&lun[2]) != 0)
-                       goto out_err;
-               break;
-       case 2:
-               break;
-       default:
-               goto out_err;
-       }
-
-       addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
-       switch (addressing_method) {
-       case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
-       case SCSI_LUN_ADDR_METHOD_FLAT:
-       case SCSI_LUN_ADDR_METHOD_LUN:
-               res = *(lun + 1) | (((*lun) & 0x3f) << 8);
-               break;
-
-       case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
-       default:
-               pr_err("Unimplemented LUN addressing method %u\n",
-                      addressing_method);
-               break;
-       }
-
-out:
-       return res;
-
-out_err:
-       pr_err("Support for multi-level LUNs has not yet been implemented\n");
-       goto out;
-}
-
 static int srpt_check_stop_free(struct se_cmd *cmd)
 {
        struct srpt_send_ioctx *ioctx = container_of(cmd,
@@ -1613,16 +1531,14 @@ static int srpt_check_stop_free(struct se_cmd *cmd)
 /**
  * srpt_handle_cmd() - Process SRP_CMD.
  */
-static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
-                          struct srpt_recv_ioctx *recv_ioctx,
-                          struct srpt_send_ioctx *send_ioctx)
+static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
+                           struct srpt_recv_ioctx *recv_ioctx,
+                           struct srpt_send_ioctx *send_ioctx)
 {
        struct se_cmd *cmd;
        struct srp_cmd *srp_cmd;
-       uint64_t unpacked_lun;
        u64 data_len;
        enum dma_data_direction dir;
-       sense_reason_t ret;
        int rc;
 
        BUG_ON(!send_ioctx);
@@ -1650,65 +1566,23 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
        if (srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len)) {
                pr_err("0x%llx: parsing SRP descriptor table failed.\n",
                       srp_cmd->tag);
-               ret = TCM_INVALID_CDB_FIELD;
-               goto send_sense;
+               goto release_ioctx;
        }
 
-       unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
-                                      sizeof(srp_cmd->lun));
        rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
-                       &send_ioctx->sense_data[0], unpacked_lun, data_len,
-                       TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+                              &send_ioctx->sense_data[0],
+                              scsilun_to_int(&srp_cmd->lun), data_len,
+                              TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
        if (rc != 0) {
-               ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               goto send_sense;
+               pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
+                        srp_cmd->tag);
+               goto release_ioctx;
        }
-       return 0;
-
-send_sense:
-       transport_send_check_condition_and_sense(cmd, ret, 0);
-       return -1;
-}
-
-/**
- * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
- * @ch: RDMA channel of the task management request.
- * @fn: Task management function to perform.
- * @req_tag: Tag of the SRP task management request.
- * @mgmt_ioctx: I/O context of the task management request.
- *
- * Returns zero if the target core will process the task management
- * request asynchronously.
- *
- * Note: It is assumed that the initiator serializes tag-based task management
- * requests.
- */
-static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
-{
-       struct srpt_device *sdev;
-       struct srpt_rdma_ch *ch;
-       struct srpt_send_ioctx *target;
-       int ret, i;
+       return;
 
-       ret = -EINVAL;
-       ch = ioctx->ch;
-       BUG_ON(!ch);
-       BUG_ON(!ch->sport);
-       sdev = ch->sport->sdev;
-       BUG_ON(!sdev);
-       spin_lock_irq(&sdev->spinlock);
-       for (i = 0; i < ch->rq_size; ++i) {
-               target = ch->ioctx_ring[i];
-               if (target->cmd.se_lun == ioctx->cmd.se_lun &&
-                   target->cmd.tag == tag &&
-                   srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
-                       ret = 0;
-                       /* now let the target core abort &target->cmd; */
-                       break;
-               }
-       }
-       spin_unlock_irq(&sdev->spinlock);
-       return ret;
+release_ioctx:
+       send_ioctx->state = SRPT_STATE_DONE;
+       srpt_release_cmd(cmd);
 }
 
 static int srp_tmr_to_tcm(int fn)
@@ -1744,8 +1618,6 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
        struct srp_tsk_mgmt *srp_tsk;
        struct se_cmd *cmd;
        struct se_session *sess = ch->sess;
-       uint64_t unpacked_lun;
-       uint32_t tag = 0;
        int tcm_tmr;
        int rc;
 
@@ -1761,26 +1633,10 @@ static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
        srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
        send_ioctx->cmd.tag = srp_tsk->tag;
        tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
-       if (tcm_tmr < 0) {
-               send_ioctx->cmd.se_tmr_req->response =
-                       TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
-               goto fail;
-       }
-       unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
-                                      sizeof(srp_tsk->lun));
-
-       if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK) {
-               rc = srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
-               if (rc < 0) {
-                       send_ioctx->cmd.se_tmr_req->response =
-                                       TMR_TASK_DOES_NOT_EXIST;
-                       goto fail;
-               }
-               tag = srp_tsk->task_tag;
-       }
-       rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL, unpacked_lun,
-                               srp_tsk, tcm_tmr, GFP_KERNEL, tag,
-                               TARGET_SCF_ACK_KREF);
+       rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
+                              scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
+                              GFP_KERNEL, srp_tsk->task_tag,
+                              TARGET_SCF_ACK_KREF);
        if (rc != 0) {
                send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
                goto fail;
@@ -1800,7 +1656,6 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
                               struct srpt_send_ioctx *send_ioctx)
 {
        struct srp_cmd *srp_cmd;
-       enum rdma_ch_state ch_state;
 
        BUG_ON(!ch);
        BUG_ON(!recv_ioctx);
@@ -1809,13 +1664,12 @@ static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
                                   recv_ioctx->ioctx.dma, srp_max_req_size,
                                   DMA_FROM_DEVICE);
 
-       ch_state = srpt_get_ch_state(ch);
-       if (unlikely(ch_state == CH_CONNECTING)) {
+       if (unlikely(ch->state == CH_CONNECTING)) {
                list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
                goto out;
        }
 
-       if (unlikely(ch_state != CH_LIVE))
+       if (unlikely(ch->state != CH_LIVE))
                goto out;
 
        srp_cmd = recv_ioctx->ioctx.buf;
@@ -1878,6 +1732,28 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
        }
 }
 
+/*
+ * This function must be called from the context in which RDMA completions are
+ * processed because it accesses the wait list without protection against
+ * access from other threads.
+ */
+static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       while (!list_empty(&ch->cmd_wait_list) &&
+              ch->state >= CH_LIVE &&
+              (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
+               struct srpt_recv_ioctx *recv_ioctx;
+
+               recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+                                             struct srpt_recv_ioctx,
+                                             wait_list);
+               list_del(&recv_ioctx->wait_list);
+               srpt_handle_new_iu(ch, recv_ioctx, ioctx);
+       }
+}
+
 /**
  * Note: Although this has not yet been observed during tests, at least in
  * theory it is possible that the srpt_get_send_ioctx() call invoked by
@@ -1905,15 +1781,10 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
 
        atomic_inc(&ch->sq_wr_avail);
 
-       if (wc->status != IB_WC_SUCCESS) {
+       if (wc->status != IB_WC_SUCCESS)
                pr_info("sending response for ioctx 0x%p failed"
                        " with status %d\n", ioctx, wc->status);
 
-               atomic_dec(&ch->req_lim);
-               srpt_abort_cmd(ioctx);
-               goto out;
-       }
-
        if (state != SRPT_STATE_DONE) {
                srpt_unmap_sg_to_ib_sge(ch, ioctx);
                transport_generic_free_cmd(&ioctx->cmd, 0);
@@ -1922,18 +1793,7 @@ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
                       " wr_id = %u.\n", ioctx->ioctx.index);
        }
 
-out:
-       while (!list_empty(&ch->cmd_wait_list) &&
-              srpt_get_ch_state(ch) == CH_LIVE &&
-              (ioctx = srpt_get_send_ioctx(ch)) != NULL) {
-               struct srpt_recv_ioctx *recv_ioctx;
-
-               recv_ioctx = list_first_entry(&ch->cmd_wait_list,
-                                             struct srpt_recv_ioctx,
-                                             wait_list);
-               list_del(&recv_ioctx->wait_list);
-               srpt_handle_new_iu(ch, recv_ioctx, ioctx);
-       }
+       srpt_process_wait_list(ch);
 }
 
 /**
@@ -1950,7 +1810,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
        WARN_ON(ch->rq_size < 1);
 
        ret = -ENOMEM;
-       qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+       qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
        if (!qp_init)
                goto out;
 
@@ -2017,168 +1877,102 @@ static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
 }
 
 /**
- * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ * srpt_close_ch() - Close an RDMA channel.
  *
- * Reset the QP and make sure all resources associated with the channel will
- * be deallocated at an appropriate time.
+ * Make sure all resources associated with the channel will be deallocated at
+ * an appropriate time.
  *
- * Note: The caller must hold ch->sport->sdev->spinlock.
+ * Returns true if and only if the channel state has been modified into
+ * CH_DRAINING.
  */
-static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+static bool srpt_close_ch(struct srpt_rdma_ch *ch)
 {
-       enum rdma_ch_state prev_state;
-       unsigned long flags;
+       int ret;
 
-       spin_lock_irqsave(&ch->spinlock, flags);
-       prev_state = ch->state;
-       switch (prev_state) {
-       case CH_CONNECTING:
-       case CH_LIVE:
-               ch->state = CH_DISCONNECTING;
-               break;
-       default:
-               break;
+       if (!srpt_set_ch_state(ch, CH_DRAINING)) {
+               pr_debug("%s-%d: already closed\n", ch->sess_name,
+                        ch->qp->qp_num);
+               return false;
        }
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-
-       switch (prev_state) {
-       case CH_CONNECTING:
-               ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
-                              NULL, 0);
-               /* fall through */
-       case CH_LIVE:
-               if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
-                       pr_err("sending CM DREQ failed.\n");
-               break;
-       case CH_DISCONNECTING:
-               break;
-       case CH_DRAINING:
-       case CH_RELEASING:
-               break;
-       }
-}
-
-/**
- * srpt_close_ch() - Close an RDMA channel.
- */
-static void srpt_close_ch(struct srpt_rdma_ch *ch)
-{
-       struct srpt_device *sdev;
 
-       sdev = ch->sport->sdev;
-       spin_lock_irq(&sdev->spinlock);
-       __srpt_close_ch(ch);
-       spin_unlock_irq(&sdev->spinlock);
-}
+       kref_get(&ch->kref);
 
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
-       struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
-       unsigned long flags;
+       ret = srpt_ch_qp_err(ch);
+       if (ret < 0)
+               pr_err("%s-%d: changing queue pair into error state failed: %d\n",
+                      ch->sess_name, ch->qp->qp_num, ret);
 
-       spin_lock_irqsave(&ch->spinlock, flags);
-       if (ch->in_shutdown) {
-               spin_unlock_irqrestore(&ch->spinlock, flags);
-               return true;
+       pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
+                ch->qp->qp_num);
+       ret = srpt_zerolength_write(ch);
+       if (ret < 0) {
+               pr_err("%s-%d: queuing zero-length write failed: %d\n",
+                      ch->sess_name, ch->qp->qp_num, ret);
+               if (srpt_set_ch_state(ch, CH_DISCONNECTED))
+                       schedule_work(&ch->release_work);
+               else
+                       WARN_ON_ONCE(true);
        }
 
-       ch->in_shutdown = true;
-       target_sess_cmd_list_set_waiting(se_sess);
-       spin_unlock_irqrestore(&ch->spinlock, flags);
+       kref_put(&ch->kref, srpt_free_ch);
 
        return true;
 }
 
-/**
- * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
- * @cm_id: Pointer to the CM ID of the channel to be drained.
- *
- * Note: Must be called from inside srpt_cm_handler to avoid a race between
- * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
- * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
- * waits until all target sessions for the associated IB device have been
- * unregistered and target session registration involves a call to
- * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
- * this function has finished).
+/*
+ * Change the channel state into CH_DISCONNECTING. If a channel has not yet
+ * reached the connected state, close it. If a channel is in the connected
+ * state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
+ * the responsibility of the caller to ensure that this function is not
+ * invoked concurrently with the code that accepts a connection. This means
+ * that this function must either be invoked from inside a CM callback
+ * function or that it must be invoked with the srpt_port.mutex held.
  */
-static void srpt_drain_channel(struct ib_cm_id *cm_id)
+static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
 {
-       struct srpt_device *sdev;
-       struct srpt_rdma_ch *ch;
        int ret;
-       bool do_reset = false;
 
-       WARN_ON_ONCE(irqs_disabled());
+       if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
+               return -ENOTCONN;
 
-       sdev = cm_id->context;
-       BUG_ON(!sdev);
-       spin_lock_irq(&sdev->spinlock);
-       list_for_each_entry(ch, &sdev->rch_list, list) {
-               if (ch->cm_id == cm_id) {
-                       do_reset = srpt_test_and_set_ch_state(ch,
-                                       CH_CONNECTING, CH_DRAINING) ||
-                                  srpt_test_and_set_ch_state(ch,
-                                       CH_LIVE, CH_DRAINING) ||
-                                  srpt_test_and_set_ch_state(ch,
-                                       CH_DISCONNECTING, CH_DRAINING);
-                       break;
-               }
-       }
-       spin_unlock_irq(&sdev->spinlock);
+       ret = ib_send_cm_dreq(ch->cm_id, NULL, 0);
+       if (ret < 0)
+               ret = ib_send_cm_drep(ch->cm_id, NULL, 0);
 
-       if (do_reset) {
-               if (ch->sess)
-                       srpt_shutdown_session(ch->sess);
+       if (ret < 0 && srpt_close_ch(ch))
+               ret = 0;
 
-               ret = srpt_ch_qp_err(ch);
-               if (ret < 0)
-                       pr_err("Setting queue pair in error state"
-                              " failed: %d\n", ret);
-       }
+       return ret;
 }
 
-/**
- * srpt_find_channel() - Look up an RDMA channel.
- * @cm_id: Pointer to the CM ID of the channel to be looked up.
- *
- * Return NULL if no matching RDMA channel has been found.
- */
-static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
-                                             struct ib_cm_id *cm_id)
+static void __srpt_close_all_ch(struct srpt_device *sdev)
 {
        struct srpt_rdma_ch *ch;
-       bool found;
 
-       WARN_ON_ONCE(irqs_disabled());
-       BUG_ON(!sdev);
+       lockdep_assert_held(&sdev->mutex);
 
-       found = false;
-       spin_lock_irq(&sdev->spinlock);
        list_for_each_entry(ch, &sdev->rch_list, list) {
-               if (ch->cm_id == cm_id) {
-                       found = true;
-                       break;
-               }
+               if (srpt_disconnect_ch(ch) >= 0)
+                       pr_info("Closing channel %s-%d because target %s has been disabled\n",
+                               ch->sess_name, ch->qp->qp_num,
+                               sdev->device->name);
+               srpt_close_ch(ch);
        }
-       spin_unlock_irq(&sdev->spinlock);
-
-       return found ? ch : NULL;
 }
 
 /**
- * srpt_release_channel() - Release channel resources.
- *
- * Schedules the actual release because:
- * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
- *   trigger a deadlock.
- * - It is not safe to call TCM transport_* functions from interrupt context.
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
  */
-static void srpt_release_channel(struct srpt_rdma_ch *ch)
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+       return 1;
+}
+
+static void srpt_free_ch(struct kref *kref)
 {
-       schedule_work(&ch->release_work);
+       struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
+
+       kfree(ch);
 }
 
 static void srpt_release_channel_work(struct work_struct *w)
@@ -2188,8 +1982,8 @@ static void srpt_release_channel_work(struct work_struct *w)
        struct se_session *se_sess;
 
        ch = container_of(w, struct srpt_rdma_ch, release_work);
-       pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
-                ch->release_done);
+       pr_debug("%s: %s-%d; release_done = %p\n", __func__, ch->sess_name,
+                ch->qp->qp_num, ch->release_done);
 
        sdev = ch->sport->sdev;
        BUG_ON(!sdev);
@@ -2197,6 +1991,7 @@ static void srpt_release_channel_work(struct work_struct *w)
        se_sess = ch->sess;
        BUG_ON(!se_sess);
 
+       target_sess_cmd_list_set_waiting(se_sess);
        target_wait_for_sess_cmds(se_sess);
 
        transport_deregister_session_configfs(se_sess);
@@ -2211,16 +2006,15 @@ static void srpt_release_channel_work(struct work_struct *w)
                             ch->sport->sdev, ch->rq_size,
                             ch->rsp_size, DMA_TO_DEVICE);
 
-       spin_lock_irq(&sdev->spinlock);
-       list_del(&ch->list);
-       spin_unlock_irq(&sdev->spinlock);
-
+       mutex_lock(&sdev->mutex);
+       list_del_init(&ch->list);
        if (ch->release_done)
                complete(ch->release_done);
+       mutex_unlock(&sdev->mutex);
 
        wake_up(&sdev->ch_releaseQ);
 
-       kfree(ch);
+       kref_put(&ch->kref, srpt_free_ch);
 }
 
 /**
@@ -2266,9 +2060,9 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
                be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
 
-       rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
-       rej = kzalloc(sizeof *rej, GFP_KERNEL);
-       rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+       rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
+       rej = kzalloc(sizeof(*rej), GFP_KERNEL);
+       rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
 
        if (!rsp || !rej || !rep_param) {
                ret = -ENOMEM;
@@ -2297,7 +2091,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
        if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
 
-               spin_lock_irq(&sdev->spinlock);
+               mutex_lock(&sdev->mutex);
 
                list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
                        if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
@@ -2305,26 +2099,16 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                            && param->port == ch->sport->port
                            && param->listen_id == ch->sport->sdev->cm_id
                            && ch->cm_id) {
-                               enum rdma_ch_state ch_state;
-
-                               ch_state = srpt_get_ch_state(ch);
-                               if (ch_state != CH_CONNECTING
-                                   && ch_state != CH_LIVE)
+                               if (srpt_disconnect_ch(ch) < 0)
                                        continue;
-
-                               /* found an existing channel */
-                               pr_debug("Found existing channel %s"
-                                        " cm_id= %p state= %d\n",
-                                        ch->sess_name, ch->cm_id, ch_state);
-
-                               __srpt_close_ch(ch);
-
+                               pr_info("Relogin - closed existing channel %s\n",
+                                       ch->sess_name);
                                rsp->rsp_flags =
                                        SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
                        }
                }
 
-               spin_unlock_irq(&sdev->spinlock);
+               mutex_unlock(&sdev->mutex);
 
        } else
                rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
@@ -2340,7 +2124,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                goto reject;
        }
 
-       ch = kzalloc(sizeof *ch, GFP_KERNEL);
+       ch = kzalloc(sizeof(*ch), GFP_KERNEL);
        if (!ch) {
                rej->reason = cpu_to_be32(
                              SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -2349,11 +2133,14 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                goto reject;
        }
 
+       kref_init(&ch->kref);
+       ch->zw_cqe.done = srpt_zerolength_write_done;
        INIT_WORK(&ch->release_work, srpt_release_channel_work);
        memcpy(ch->i_port_id, req->initiator_port_id, 16);
        memcpy(ch->t_port_id, req->target_port_id, 16);
        ch->sport = &sdev->port[param->port - 1];
        ch->cm_id = cm_id;
+       cm_id->context = ch;
        /*
         * Avoid QUEUE_FULL conditions by limiting the number of buffers used
         * for the SRP protocol to the command queue size.
@@ -2453,7 +2240,7 @@ try_again:
        /* create cm reply */
        rep_param->qp_num = ch->qp->qp_num;
        rep_param->private_data = (void *)rsp;
-       rep_param->private_data_len = sizeof *rsp;
+       rep_param->private_data_len = sizeof(*rsp);
        rep_param->rnr_retry_count = 7;
        rep_param->flow_control = 1;
        rep_param->failover_accepted = 0;
@@ -2468,14 +2255,14 @@ try_again:
                goto release_channel;
        }
 
-       spin_lock_irq(&sdev->spinlock);
+       mutex_lock(&sdev->mutex);
        list_add_tail(&ch->list, &sdev->rch_list);
-       spin_unlock_irq(&sdev->spinlock);
+       mutex_unlock(&sdev->mutex);
 
        goto out;
 
 release_channel:
-       srpt_set_ch_state(ch, CH_RELEASING);
+       srpt_disconnect_ch(ch);
        transport_deregister_session_configfs(ch->sess);
        transport_deregister_session(ch->sess);
        ch->sess = NULL;
@@ -2497,7 +2284,7 @@ reject:
                                   | SRP_BUF_FORMAT_INDIRECT);
 
        ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
-                            (void *)rej, sizeof *rej);
+                            (void *)rej, sizeof(*rej));
 
 out:
        kfree(rep_param);
@@ -2507,10 +2294,23 @@ out:
        return ret;
 }
 
-static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
+                            enum ib_cm_rej_reason reason,
+                            const u8 *private_data,
+                            u8 private_data_len)
 {
-       pr_info("Received IB REJ for cm_id %p.\n", cm_id);
-       srpt_drain_channel(cm_id);
+       char *priv = NULL;
+       int i;
+
+       if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
+                                               GFP_KERNEL))) {
+               for (i = 0; i < private_data_len; i++)
+                       sprintf(priv + 3 * i, " %02x", private_data[i]);
+       }
+       pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
+               ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
+               "; private data" : "", priv ? priv : " (?)");
+       kfree(priv);
 }
 
 /**
@@ -2519,87 +2319,23 @@ static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
  * An IB_CM_RTU_RECEIVED message indicates that the connection is established
  * and that the recipient may begin transmitting (RTU = ready to use).
  */
-static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
 {
-       struct srpt_rdma_ch *ch;
        int ret;
 
-       ch = srpt_find_channel(cm_id->context, cm_id);
-       BUG_ON(!ch);
-
-       if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
-               struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
-
+       if (srpt_set_ch_state(ch, CH_LIVE)) {
                ret = srpt_ch_qp_rts(ch, ch->qp);
 
-               list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
-                                        wait_list) {
-                       list_del(&ioctx->wait_list);
-                       srpt_handle_new_iu(ch, ioctx, NULL);
-               }
-               if (ret)
+               if (ret == 0) {
+                       /* Trigger wait list processing. */
+                       ret = srpt_zerolength_write(ch);
+                       WARN_ONCE(ret < 0, "%d\n", ret);
+               } else {
                        srpt_close_ch(ch);
+               }
        }
 }
 
-static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
-{
-       pr_info("Received IB TimeWait exit for cm_id %p.\n", cm_id);
-       srpt_drain_channel(cm_id);
-}
-
-static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
-{
-       pr_info("Received IB REP error for cm_id %p.\n", cm_id);
-       srpt_drain_channel(cm_id);
-}
-
-/**
- * srpt_cm_dreq_recv() - Process reception of a DREQ message.
- */
-static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
-{
-       struct srpt_rdma_ch *ch;
-       unsigned long flags;
-       bool send_drep = false;
-
-       ch = srpt_find_channel(cm_id->context, cm_id);
-       BUG_ON(!ch);
-
-       pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
-
-       spin_lock_irqsave(&ch->spinlock, flags);
-       switch (ch->state) {
-       case CH_CONNECTING:
-       case CH_LIVE:
-               send_drep = true;
-               ch->state = CH_DISCONNECTING;
-               break;
-       case CH_DISCONNECTING:
-       case CH_DRAINING:
-       case CH_RELEASING:
-               WARN(true, "unexpected channel state %d\n", ch->state);
-               break;
-       }
-       spin_unlock_irqrestore(&ch->spinlock, flags);
-
-       if (send_drep) {
-               if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
-                       pr_err("Sending IB DREP failed.\n");
-               pr_info("Received DREQ and sent DREP for session %s.\n",
-                       ch->sess_name);
-       }
-}
-
-/**
- * srpt_cm_drep_recv() - Process reception of a DREP message.
- */
-static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
-{
-       pr_info("Received InfiniBand DREP message for cm_id %p.\n", cm_id);
-       srpt_drain_channel(cm_id);
-}
-
 /**
  * srpt_cm_handler() - IB connection manager callback function.
  *
@@ -2612,6 +2348,7 @@ static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
  */
 static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
 {
+       struct srpt_rdma_ch *ch = cm_id->context;
        int ret;
 
        ret = 0;
@@ -2621,32 +2358,39 @@ static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
                                       event->private_data);
                break;
        case IB_CM_REJ_RECEIVED:
-               srpt_cm_rej_recv(cm_id);
+               srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
+                                event->private_data,
+                                IB_CM_REJ_PRIVATE_DATA_SIZE);
                break;
        case IB_CM_RTU_RECEIVED:
        case IB_CM_USER_ESTABLISHED:
-               srpt_cm_rtu_recv(cm_id);
+               srpt_cm_rtu_recv(ch);
                break;
        case IB_CM_DREQ_RECEIVED:
-               srpt_cm_dreq_recv(cm_id);
+               srpt_disconnect_ch(ch);
                break;
        case IB_CM_DREP_RECEIVED:
-               srpt_cm_drep_recv(cm_id);
+               pr_info("Received CM DREP message for ch %s-%d.\n",
+                       ch->sess_name, ch->qp->qp_num);
+               srpt_close_ch(ch);
                break;
        case IB_CM_TIMEWAIT_EXIT:
-               srpt_cm_timewait_exit(cm_id);
+               pr_info("Received CM TimeWait exit for ch %s-%d.\n",
+                       ch->sess_name, ch->qp->qp_num);
+               srpt_close_ch(ch);
                break;
        case IB_CM_REP_ERROR:
-               srpt_cm_rep_error(cm_id);
+               pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
+                       ch->qp->qp_num);
                break;
        case IB_CM_DREQ_ERROR:
-               pr_info("Received IB DREQ ERROR event.\n");
+               pr_info("Received CM DREQ ERROR event.\n");
                break;
        case IB_CM_MRA_RECEIVED:
-               pr_info("Received IB MRA event\n");
+               pr_info("Received CM MRA event\n");
                break;
        default:
-               pr_err("received unrecognized IB CM event %d\n", event->event);
+               pr_err("received unrecognized CM event %d\n", event->event);
                break;
        }
 
@@ -2755,41 +2499,14 @@ static int srpt_write_pending_status(struct se_cmd *se_cmd)
  */
 static int srpt_write_pending(struct se_cmd *se_cmd)
 {
-       struct srpt_rdma_ch *ch;
-       struct srpt_send_ioctx *ioctx;
+       struct srpt_send_ioctx *ioctx =
+               container_of(se_cmd, struct srpt_send_ioctx, cmd);
+       struct srpt_rdma_ch *ch = ioctx->ch;
        enum srpt_command_state new_state;
-       enum rdma_ch_state ch_state;
-       int ret;
-
-       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
 
        new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
        WARN_ON(new_state == SRPT_STATE_DONE);
-
-       ch = ioctx->ch;
-       BUG_ON(!ch);
-
-       ch_state = srpt_get_ch_state(ch);
-       switch (ch_state) {
-       case CH_CONNECTING:
-               WARN(true, "unexpected channel state %d\n", ch_state);
-               ret = -EINVAL;
-               goto out;
-       case CH_LIVE:
-               break;
-       case CH_DISCONNECTING:
-       case CH_DRAINING:
-       case CH_RELEASING:
-               pr_debug("cmd with tag %lld: channel disconnecting\n",
-                        ioctx->cmd.tag);
-               srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
-               ret = -EINVAL;
-               goto out;
-       }
-       ret = srpt_xfer_data(ch, ioctx);
-
-out:
-       return ret;
+       return srpt_xfer_data(ch, ioctx);
 }
 
 static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
@@ -2920,36 +2637,25 @@ static void srpt_refresh_port_work(struct work_struct *work)
        srpt_refresh_port(sport);
 }
 
-static int srpt_ch_list_empty(struct srpt_device *sdev)
-{
-       int res;
-
-       spin_lock_irq(&sdev->spinlock);
-       res = list_empty(&sdev->rch_list);
-       spin_unlock_irq(&sdev->spinlock);
-
-       return res;
-}
-
 /**
  * srpt_release_sdev() - Free the channel resources associated with a target.
  */
 static int srpt_release_sdev(struct srpt_device *sdev)
 {
-       struct srpt_rdma_ch *ch, *tmp_ch;
-       int res;
+       int i, res;
 
        WARN_ON_ONCE(irqs_disabled());
 
        BUG_ON(!sdev);
 
-       spin_lock_irq(&sdev->spinlock);
-       list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
-               __srpt_close_ch(ch);
-       spin_unlock_irq(&sdev->spinlock);
+       mutex_lock(&sdev->mutex);
+       for (i = 0; i < ARRAY_SIZE(sdev->port); i++)
+               sdev->port[i].enabled = false;
+       __srpt_close_all_ch(sdev);
+       mutex_unlock(&sdev->mutex);
 
        res = wait_event_interruptible(sdev->ch_releaseQ,
-                                      srpt_ch_list_empty(sdev));
+                                      list_empty_careful(&sdev->rch_list));
        if (res)
                pr_err("%s: interrupted.\n", __func__);
 
@@ -3003,14 +2709,14 @@ static void srpt_add_one(struct ib_device *device)
        pr_debug("device = %p, device->dma_ops = %p\n", device,
                 device->dma_ops);
 
-       sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+       sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
        if (!sdev)
                goto err;
 
        sdev->device = device;
        INIT_LIST_HEAD(&sdev->rch_list);
        init_waitqueue_head(&sdev->ch_releaseQ);
-       spin_lock_init(&sdev->spinlock);
+       mutex_init(&sdev->mutex);
 
        sdev->pd = ib_alloc_pd(device);
        if (IS_ERR(sdev->pd))
@@ -3082,7 +2788,7 @@ static void srpt_add_one(struct ib_device *device)
 
                if (srpt_refresh_port(sport)) {
                        pr_err("MAD registration failed for %s-%d.\n",
-                              srpt_sdev_name(sdev), i);
+                              sdev->device->name, i);
                        goto err_ring;
                }
                snprintf(sport->port_guid, sizeof(sport->port_guid),
@@ -3231,24 +2937,26 @@ static void srpt_release_cmd(struct se_cmd *se_cmd)
 static void srpt_close_session(struct se_session *se_sess)
 {
        DECLARE_COMPLETION_ONSTACK(release_done);
-       struct srpt_rdma_ch *ch;
-       struct srpt_device *sdev;
-       unsigned long res;
-
-       ch = se_sess->fabric_sess_ptr;
-       WARN_ON(ch->sess != se_sess);
+       struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
+       struct srpt_device *sdev = ch->sport->sdev;
+       bool wait;
 
-       pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+       pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
+                ch->state);
 
-       sdev = ch->sport->sdev;
-       spin_lock_irq(&sdev->spinlock);
+       mutex_lock(&sdev->mutex);
        BUG_ON(ch->release_done);
        ch->release_done = &release_done;
-       __srpt_close_ch(ch);
-       spin_unlock_irq(&sdev->spinlock);
+       wait = !list_empty(&ch->list);
+       srpt_disconnect_ch(ch);
+       mutex_unlock(&sdev->mutex);
 
-       res = wait_for_completion_timeout(&release_done, 60 * HZ);
-       WARN_ON(res == 0);
+       if (!wait)
+               return;
+
+       while (wait_for_completion_timeout(&release_done, 180 * HZ) == 0)
+               pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
+                       ch->sess_name, ch->qp->qp_num, ch->state);
 }
 
 /**
@@ -3456,6 +3164,8 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
 {
        struct se_portal_group *se_tpg = to_tpg(item);
        struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+       struct srpt_device *sdev = sport->sdev;
+       struct srpt_rdma_ch *ch;
        unsigned long tmp;
         int ret;
 
@@ -3469,11 +3179,24 @@ static ssize_t srpt_tpg_enable_store(struct config_item *item,
                pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
                return -EINVAL;
        }
-       if (tmp == 1)
-               sport->enabled = true;
-       else
-               sport->enabled = false;
+       if (sport->enabled == tmp)
+               goto out;
+       sport->enabled = tmp;
+       if (sport->enabled)
+               goto out;
+
+       mutex_lock(&sdev->mutex);
+       list_for_each_entry(ch, &sdev->rch_list, list) {
+               if (ch->sport == sport) {
+                       pr_debug("%s: ch %p %s-%d\n", __func__, ch,
+                                ch->sess_name, ch->qp->qp_num);
+                       srpt_disconnect_ch(ch);
+                       srpt_close_ch(ch);
+               }
+       }
+       mutex_unlock(&sdev->mutex);
 
+out:
        return count;
 }
 
@@ -3565,7 +3288,6 @@ static struct configfs_attribute *srpt_wwn_attrs[] = {
 static const struct target_core_fabric_ops srpt_template = {
        .module                         = THIS_MODULE,
        .name                           = "srpt",
-       .node_acl_size                  = sizeof(struct srpt_node_acl),
        .get_fabric_name                = srpt_get_fabric_name,
        .tpg_get_wwn                    = srpt_get_fabric_wwn,
        .tpg_get_tag                    = srpt_get_tag,
index 09037f2b0b51430d568e847e7ebd5f24e7fd906e..af9b8b527340c80f4c8af515cc4aa641a5c5b426 100644 (file)
@@ -218,20 +218,20 @@ struct srpt_send_ioctx {
 
 /**
  * enum rdma_ch_state - SRP channel state.
- * @CH_CONNECTING:      QP is in RTR state; waiting for RTU.
- * @CH_LIVE:            QP is in RTS state.
- * @CH_DISCONNECTING:    DREQ has been received; waiting for DREP
- *                       or DREQ has been send and waiting for DREP
- *                       or .
- * @CH_DRAINING:        QP is in ERR state; waiting for last WQE event.
- * @CH_RELEASING:       Last WQE event has been received; releasing resources.
+ * @CH_CONNECTING:    QP is in RTR state; waiting for RTU.
+ * @CH_LIVE:         QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been sent and waiting for DREP or DREQ has
+ *                    been received.
+ * @CH_DRAINING:      DREP has been received or waiting for DREP timed out
+ *                    and last work request has been queued.
+ * @CH_DISCONNECTED:  Last completion has been received.
  */
 enum rdma_ch_state {
        CH_CONNECTING,
        CH_LIVE,
        CH_DISCONNECTING,
        CH_DRAINING,
-       CH_RELEASING
+       CH_DISCONNECTED,
 };
 
 /**
@@ -267,6 +267,8 @@ struct srpt_rdma_ch {
        struct ib_cm_id         *cm_id;
        struct ib_qp            *qp;
        struct ib_cq            *cq;
+       struct ib_cqe           zw_cqe;
+       struct kref             kref;
        int                     rq_size;
        u32                     rsp_size;
        atomic_t                sq_wr_avail;
@@ -286,7 +288,6 @@ struct srpt_rdma_ch {
        u8                      sess_name[36];
        struct work_struct      release_work;
        struct completion       *release_done;
-       bool                    in_shutdown;
 };
 
 /**
@@ -343,7 +344,7 @@ struct srpt_port {
  * @ioctx_ring:    Per-HCA SRQ.
  * @rch_list:      Per-device channel list -- see also srpt_rdma_ch.list.
  * @ch_releaseQ:   Enables waiting for removal from rch_list.
- * @spinlock:      Protects rch_list and tpg.
+ * @mutex:         Protects rch_list.
  * @port:          Information about the ports owned by this HCA.
  * @event_handler: Per-HCA asynchronous IB event handler.
  * @list:          Node in srpt_dev_list.
@@ -357,18 +358,10 @@ struct srpt_device {
        struct srpt_recv_ioctx  **ioctx_ring;
        struct list_head        rch_list;
        wait_queue_head_t       ch_releaseQ;
-       spinlock_t              spinlock;
+       struct mutex            mutex;
        struct srpt_port        port[2];
        struct ib_event_handler event_handler;
        struct list_head        list;
 };
 
-/**
- * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
- * @nacl:      Target core node ACL information.
- */
-struct srpt_node_acl {
-       struct se_node_acl      nacl;
-};
-
 #endif                         /* IB_SRPT_H */
index a072d341e205bf272418d0104b084a0898281e7e..1d2d1da40c80a98ee0aa95c962952cadb46172de 100644 (file)
@@ -1021,6 +1021,8 @@ struct cpl_l2t_write_req {
 #define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S)
 #define L2T_W_NOREPLY_F    L2T_W_NOREPLY_V(1U)
 
+#define CPL_L2T_VLAN_NONE 0xfff
+
 struct cpl_l2t_write_rpl {
        union opcode_tid ot;
        u8 status;
index a32de30ea663b396833305cc6a51339c7f7c05c8..c8661c77b4e3da341a63996c0a557aab128305c6 100644 (file)
@@ -561,6 +561,7 @@ enum fw_flowc_mnem {
        FW_FLOWC_MNEM_SNDBUF,
        FW_FLOWC_MNEM_MSS,
        FW_FLOWC_MNEM_TXDATAPLEN_MAX,
+       FW_FLOWC_MNEM_SCHEDCLASS = 11,
 };
 
 struct fw_flowc_mnemval {
index b4729ba57c9c1e88f10fa2ceaecd32821e7d18e7..3b3c63e54ed638f03278506b91c0eaa854a6c575 100644 (file)
@@ -41,6 +41,7 @@ i40e-objs := i40e_main.o \
        i40e_diag.o     \
        i40e_txrx.o     \
        i40e_ptp.o      \
+       i40e_client.o   \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
index 68f2204ec6f3aa712ad8768f2985c46c9268df0c..e734c649227d9d255dddff79bb898c70c2ed40d8 100644 (file)
@@ -58,6 +58,7 @@
 #ifdef I40E_FCOE
 #include "i40e_fcoe.h"
 #endif
+#include "i40e_client.h"
 #include "i40e_virtchnl.h"
 #include "i40e_virtchnl_pf.h"
 #include "i40e_txrx.h"
@@ -178,6 +179,7 @@ struct i40e_lump_tracking {
        u16 search_hint;
        u16 list[0];
 #define I40E_PILE_VALID_BIT  0x8000
+#define I40E_IWARP_IRQ_PILE_ID  (I40E_PILE_VALID_BIT - 2)
 };
 
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
@@ -270,6 +272,8 @@ struct i40e_pf {
 #endif /* I40E_FCOE */
        u16 num_lan_qps;           /* num lan queues this PF has set up */
        u16 num_lan_msix;          /* num queue vectors for the base PF vsi */
+       u16 num_iwarp_msix;        /* num of iwarp vectors for this PF */
+       int iwarp_base_vector;
        int queues_left;           /* queues left unclaimed */
        u16 alloc_rss_size;        /* allocated RSS queues */
        u16 rss_size_max;          /* HW defined max RSS queues */
@@ -317,6 +321,7 @@ struct i40e_pf {
 #define I40E_FLAG_16BYTE_RX_DESC_ENABLED       BIT_ULL(13)
 #define I40E_FLAG_CLEAN_ADMINQ                 BIT_ULL(14)
 #define I40E_FLAG_FILTER_SYNC                  BIT_ULL(15)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED     BIT_ULL(16)
 #define I40E_FLAG_PROCESS_MDD_EVENT            BIT_ULL(17)
 #define I40E_FLAG_PROCESS_VFLR_EVENT           BIT_ULL(18)
 #define I40E_FLAG_SRIOV_ENABLED                        BIT_ULL(19)
@@ -557,6 +562,8 @@ struct i40e_vsi {
        struct kobject *kobj;  /* sysfs object */
        bool current_isup;     /* Sync 'link up' logging */
 
+       void *priv;     /* client driver data reference. */
+
        /* VSI specific handlers */
        irqreturn_t (*irq_handler)(int irq, void *data);
 
@@ -714,6 +721,10 @@ void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                              struct i40e_vsi_context *ctxt,
                              u8 enabled_tc, bool is_add);
 #endif
+void i40e_service_event_schedule(struct i40e_pf *pf);
+void i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id,
+                                 u8 *msg, u16 len);
+
 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -736,6 +747,17 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
 static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
+/* needed by client drivers */
+int i40e_lan_add_device(struct i40e_pf *pf);
+int i40e_lan_del_device(struct i40e_pf *pf);
+void i40e_client_subtask(struct i40e_pf *pf);
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi);
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+                          enum i40e_client_type type);
 /**
  * i40e_irq_dynamic_enable - Enable default interrupt generation settings
  * @vsi: pointer to a vsi
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
new file mode 100644 (file)
index 0000000..0e6ac84
--- /dev/null
@@ -0,0 +1,1012 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#include <linux/list.h>
+#include <linux/errno.h>
+
+#include "i40e.h"
+#include "i40e_prototype.h"
+#include "i40e_client.h"
+
+static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
+
+static LIST_HEAD(i40e_devices);
+static DEFINE_MUTEX(i40e_device_mutex);
+
+static LIST_HEAD(i40e_clients);
+static DEFINE_MUTEX(i40e_client_mutex);
+
+static LIST_HEAD(i40e_client_instances);
+static DEFINE_MUTEX(i40e_client_instance_mutex);
+
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+                                    struct i40e_client *client,
+                                    u32 vf_id, u8 *msg, u16 len);
+
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+                                   struct i40e_client *client,
+                                   struct i40e_qvlist_info *qvlist_info);
+
+static void i40e_client_request_reset(struct i40e_info *ldev,
+                                     struct i40e_client *client,
+                                     u32 reset_level);
+
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+                                      struct i40e_client *client,
+                                      bool is_vf, u32 vf_id,
+                                      u32 flag, u32 valid_flag);
+
+static struct i40e_ops i40e_lan_ops = {
+       .virtchnl_send = i40e_client_virtchnl_send,
+       .setup_qvlist = i40e_client_setup_qvlist,
+       .request_reset = i40e_client_request_reset,
+       .update_vsi_ctxt = i40e_client_update_vsi_ctxt,
+};
+
+/**
+ * i40e_client_type_to_vsi_type - convert client type to vsi type
+ * @client_type: the i40e_client type
+ *
+ * returns the related vsi type value
+ **/
+static
+enum i40e_vsi_type i40e_client_type_to_vsi_type(enum i40e_client_type type)
+{
+       switch (type) {
+       case I40E_CLIENT_IWARP:
+               return I40E_VSI_IWARP;
+
+       case I40E_CLIENT_VMDQ2:
+               return I40E_VSI_VMDQ2;
+
+       default:
+               pr_err("i40e: Client type unknown\n");
+               return I40E_VSI_TYPE_UNKNOWN;
+       }
+}
+
+/**
+ * i40e_client_get_params - Get the params that can change at runtime
+ * @vsi: the VSI with the message
+ * @param: clinet param struct
+ *
+ **/
+static
+int i40e_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+       struct i40e_dcbx_config *dcb_cfg = &vsi->back->hw.local_dcbx_config;
+       int i = 0;
+
+       for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+               u8 tc = dcb_cfg->etscfg.prioritytable[i];
+               u16 qs_handle;
+
+               /* If TC is not enabled for VSI use TC0 for UP */
+               if (!(vsi->tc_config.enabled_tc & BIT(tc)))
+                       tc = 0;
+
+               qs_handle = le16_to_cpu(vsi->info.qs_handle[tc]);
+               params->qos.prio_qos[i].tc = tc;
+               params->qos.prio_qos[i].qs_handle = qs_handle;
+               if (qs_handle == I40E_AQ_VSI_QS_HANDLE_INVALID) {
+                       dev_err(&vsi->back->pdev->dev, "Invalid queue set handle for TC = %d, vsi id = %d\n",
+                               tc, vsi->id);
+                       return -EINVAL;
+               }
+       }
+
+       params->mtu = vsi->netdev->mtu;
+       return 0;
+}
+
+/**
+ * i40e_notify_client_of_vf_msg - call the client vf message callback
+ * @vsi: the VSI with the message
+ * @vf_id: the absolute VF id that sent the message
+ * @msg: message buffer
+ * @len: length of the message
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void
+i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
+{
+       struct i40e_client_instance *cdev;
+
+       if (!vsi)
+               return;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.pf == vsi->back) {
+                       if (!cdev->client ||
+                           !cdev->client->ops ||
+                           !cdev->client->ops->virtchnl_receive) {
+                               dev_dbg(&vsi->back->pdev->dev,
+                                       "Cannot locate client instance virtual channel receive routine\n");
+                               continue;
+                       }
+                       cdev->client->ops->virtchnl_receive(&cdev->lan_info,
+                                                           cdev->client,
+                                                           vf_id, msg, len);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_l2_param_changes - call the client notify callback
+ * @vsi: the VSI with l2 param changes
+ *
+ * If there is a client to this VSI, call the client
+ **/
+void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+{
+       struct i40e_client_instance *cdev;
+       struct i40e_params params;
+
+       if (!vsi)
+               return;
+       memset(&params, 0, sizeof(params));
+       i40e_client_get_params(vsi, &params);
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.pf == vsi->back) {
+                       if (!cdev->client ||
+                           !cdev->client->ops ||
+                           !cdev->client->ops->l2_param_change) {
+                               dev_dbg(&vsi->back->pdev->dev,
+                                       "Cannot locate client instance l2_param_change routine\n");
+                               continue;
+                       }
+                       cdev->lan_info.params = params;
+                       cdev->client->ops->l2_param_change(&cdev->lan_info,
+                                                          cdev->client,
+                                                          &params);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_netdev_open - call the client open callback
+ * @vsi: the VSI with netdev opened
+ *
+ * If there is a client to this netdev, call the client with open
+ **/
+void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
+{
+       struct i40e_client_instance *cdev;
+
+       if (!vsi)
+               return;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.netdev == vsi->netdev) {
+                       if (!cdev->client ||
+                           !cdev->client->ops || !cdev->client->ops->open) {
+                               dev_dbg(&vsi->back->pdev->dev,
+                                       "Cannot locate client instance open routine\n");
+                               continue;
+                       }
+                       cdev->client->ops->open(&cdev->lan_info, cdev->client);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_client_release_qvlist
+ * @ldev: pointer to L2 context.
+ *
+ **/
+static void i40e_client_release_qvlist(struct i40e_info *ldev)
+{
+       struct i40e_qvlist_info *qvlist_info = ldev->qvlist_info;
+       u32 i;
+
+       if (!ldev->qvlist_info)
+               return;
+
+       for (i = 0; i < qvlist_info->num_vectors; i++) {
+               struct i40e_pf *pf = ldev->pf;
+               struct i40e_qv_info *qv_info;
+               u32 reg_idx;
+
+               qv_info = &qvlist_info->qv_info[i];
+               if (!qv_info)
+                       continue;
+               reg_idx = I40E_PFINT_LNKLSTN(qv_info->v_idx - 1);
+               wr32(&pf->hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+       }
+       kfree(ldev->qvlist_info);
+       ldev->qvlist_info = NULL;
+}
+
+/**
+ * i40e_notify_client_of_netdev_close - call the client close callback
+ * @vsi: the VSI with netdev closed
+ * @reset: true when close called due to a reset pending
+ *
+ * If there is a client to this netdev, call the client with close
+ **/
+void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+{
+       struct i40e_client_instance *cdev;
+
+       if (!vsi)
+               return;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.netdev == vsi->netdev) {
+                       if (!cdev->client ||
+                           !cdev->client->ops || !cdev->client->ops->close) {
+                               dev_dbg(&vsi->back->pdev->dev,
+                                       "Cannot locate client instance close routine\n");
+                               continue;
+                       }
+                       cdev->client->ops->close(&cdev->lan_info, cdev->client,
+                                                reset);
+                       i40e_client_release_qvlist(&cdev->lan_info);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_reset - call the client vf reset callback
+ * @pf: PF device pointer
+ * @vf_id: asolute id of VF being reset
+ *
+ * If there is a client attached to this PF, notify when a VF is reset
+ **/
+void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id)
+{
+       struct i40e_client_instance *cdev;
+
+       if (!pf)
+               return;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.pf == pf) {
+                       if (!cdev->client ||
+                           !cdev->client->ops ||
+                           !cdev->client->ops->vf_reset) {
+                               dev_dbg(&pf->pdev->dev,
+                                       "Cannot locate client instance VF reset routine\n");
+                               continue;
+                       }
+                       cdev->client->ops->vf_reset(&cdev->lan_info,
+                                                   cdev->client, vf_id);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_notify_client_of_vf_enable - call the client vf notification callback
+ * @pf: PF device pointer
+ * @num_vfs: the number of VFs currently enabled, 0 for disable
+ *
+ * If there is a client attached to this PF, call its VF notification routine
+ **/
+void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs)
+{
+       struct i40e_client_instance *cdev;
+
+       if (!pf)
+               return;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.pf == pf) {
+                       if (!cdev->client ||
+                           !cdev->client->ops ||
+                           !cdev->client->ops->vf_enable) {
+                               dev_dbg(&pf->pdev->dev,
+                                       "Cannot locate client instance VF enable routine\n");
+                               continue;
+                       }
+                       cdev->client->ops->vf_enable(&cdev->lan_info,
+                                                    cdev->client, num_vfs);
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+}
+
+/**
+ * i40e_vf_client_capable - ask the client if it likes the specified VF
+ * @pf: PF device pointer
+ * @vf_id: the VF in question
+ *
+ * If there is a client of the specified type attached to this PF, call
+ * its vf_capable routine
+ **/
+int i40e_vf_client_capable(struct i40e_pf *pf, u32 vf_id,
+                          enum i40e_client_type type)
+{
+       struct i40e_client_instance *cdev;
+       int capable = false;
+
+       if (!pf)
+               return false;
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if (cdev->lan_info.pf == pf) {
+                       if (!cdev->client ||
+                           !cdev->client->ops ||
+                           !cdev->client->ops->vf_capable ||
+                           !(cdev->client->type == type)) {
+                               dev_dbg(&pf->pdev->dev,
+                                       "Cannot locate client instance VF capability routine\n");
+                               continue;
+                       }
+                       capable = cdev->client->ops->vf_capable(&cdev->lan_info,
+                                                               cdev->client,
+                                                               vf_id);
+                       break;
+               }
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+       return capable;
+}
+
+/**
+ * i40e_vsi_lookup - finds a matching VSI from the PF list starting at start_vsi
+ * @pf: board private structure
+ * @type: vsi type
+ * @start_vsi: a VSI pointer from where to start the search
+ *
+ * Returns non NULL on success or NULL for failure
+ **/
+struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
+                                enum i40e_vsi_type type,
+                                struct i40e_vsi *start_vsi)
+{
+       struct i40e_vsi *vsi;
+       int i = 0;
+
+       if (start_vsi) {
+               for (i = 0; i < pf->num_alloc_vsi; i++) {
+                       vsi = pf->vsi[i];
+                       if (vsi == start_vsi)
+                               break;
+               }
+       }
+       for (; i < pf->num_alloc_vsi; i++) {
+               vsi = pf->vsi[i];
+               if (vsi && vsi->type == type)
+                       return vsi;
+       }
+
+       return NULL;
+}
+
+/**
+ * i40e_client_add_instance - add a client instance struct to the instance list
+ * @pf: pointer to the board struct
+ * @client: pointer to a client struct in the client list.
+ *
+ * Returns cdev ptr on success, NULL on failure
+ **/
+static
+struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
+                                                     struct i40e_client *client)
+{
+       struct i40e_client_instance *cdev;
+       struct netdev_hw_addr *mac = NULL;
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry(cdev, &i40e_client_instances, list) {
+               if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
+                       cdev = NULL;
+                       goto out;
+               }
+       }
+       cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+       if (!cdev)
+               goto out;
+
+       cdev->lan_info.pf = (void *)pf;
+       cdev->lan_info.netdev = vsi->netdev;
+       cdev->lan_info.pcidev = pf->pdev;
+       cdev->lan_info.fid = pf->hw.pf_id;
+       cdev->lan_info.ftype = I40E_CLIENT_FTYPE_PF;
+       cdev->lan_info.hw_addr = pf->hw.hw_addr;
+       cdev->lan_info.ops = &i40e_lan_ops;
+       cdev->lan_info.version.major = I40E_CLIENT_VERSION_MAJOR;
+       cdev->lan_info.version.minor = I40E_CLIENT_VERSION_MINOR;
+       cdev->lan_info.version.build = I40E_CLIENT_VERSION_BUILD;
+       cdev->lan_info.fw_maj_ver = pf->hw.aq.fw_maj_ver;
+       cdev->lan_info.fw_min_ver = pf->hw.aq.fw_min_ver;
+       cdev->lan_info.fw_build = pf->hw.aq.fw_build;
+       set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
+
+       if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
+               kfree(cdev);
+               cdev = NULL;
+               goto out;
+       }
+
+       cdev->lan_info.msix_count = pf->num_iwarp_msix;
+       cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+       mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
+                              struct netdev_hw_addr, list);
+       if (mac)
+               ether_addr_copy(cdev->lan_info.lanmac, mac->addr);
+       else
+               dev_err(&pf->pdev->dev, "MAC address list is empty!\n");
+
+       cdev->client = client;
+       INIT_LIST_HEAD(&cdev->list);
+       list_add(&cdev->list, &i40e_client_instances);
+out:
+       mutex_unlock(&i40e_client_instance_mutex);
+       return cdev;
+}
+
+/**
+ * i40e_client_del_instance - removes a client instance from the list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+static
+int i40e_client_del_instance(struct i40e_pf *pf, struct i40e_client *client)
+{
+       struct i40e_client_instance *cdev, *tmp;
+       int ret = -ENODEV;
+
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+               if ((cdev->lan_info.pf != pf) || (cdev->client != client))
+                       continue;
+
+               dev_info(&pf->pdev->dev, "Deleted instance of Client %s, of dev %d bus=0x%02x func=0x%02x)\n",
+                        client->name, pf->hw.pf_id,
+                        pf->hw.bus.device, pf->hw.bus.func);
+               list_del(&cdev->list);
+               kfree(cdev);
+               ret = 0;
+               break;
+       }
+       mutex_unlock(&i40e_client_instance_mutex);
+       return ret;
+}
+
+/**
+ * i40e_client_subtask - client maintenance work
+ * @pf: board private structure
+ **/
+void i40e_client_subtask(struct i40e_pf *pf)
+{
+       struct i40e_client_instance *cdev;
+       struct i40e_client *client;
+       int ret = 0;
+
+       if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
+               return;
+       pf->flags &= ~I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+
+       /* If we're down or resetting, just bail */
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_CONFIG_BUSY, &pf->state))
+               return;
+
+       /* Check client state and instantiate client if client registered */
+       mutex_lock(&i40e_client_mutex);
+       list_for_each_entry(client, &i40e_clients, list) {
+               /* first check client is registered */
+               if (!test_bit(__I40E_CLIENT_REGISTERED, &client->state))
+                       continue;
+
+               /* Do we also need the LAN VSI to be up, to create instance */
+               if (!(client->flags & I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE)) {
+                       /* check if L2 VSI is up, if not we are not ready */
+                       if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
+                               continue;
+               }
+
+               /* Add the client instance to the instance list */
+               cdev = i40e_client_add_instance(pf, client);
+               if (!cdev)
+                       continue;
+
+               /* Also up the ref_cnt of no. of instances of this client */
+               atomic_inc(&client->ref_cnt);
+               dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+                        client->name, pf->hw.pf_id,
+                        pf->hw.bus.device, pf->hw.bus.func);
+
+               /* Send an Open request to the client */
+               atomic_inc(&cdev->ref_cnt);
+               if (client->ops && client->ops->open)
+                       ret = client->ops->open(&cdev->lan_info, client);
+               atomic_dec(&cdev->ref_cnt);
+               if (!ret) {
+                       set_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+               } else {
+                       /* remove client instance */
+                       i40e_client_del_instance(pf, client);
+                       atomic_dec(&client->ref_cnt);
+                       continue;
+               }
+       }
+       mutex_unlock(&i40e_client_mutex);
+}
+
+/**
+ * i40e_lan_add_device - add a lan device struct to the list of lan devices
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or none 0 on error
+ **/
+int i40e_lan_add_device(struct i40e_pf *pf)
+{
+       struct i40e_device *ldev;
+       int ret = 0;
+
+       mutex_lock(&i40e_device_mutex);
+       list_for_each_entry(ldev, &i40e_devices, list) {
+               if (ldev->pf == pf) {
+                       ret = -EEXIST;
+                       goto out;
+               }
+       }
+       ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+       if (!ldev) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       ldev->pf = pf;
+       INIT_LIST_HEAD(&ldev->list);
+       list_add(&ldev->list, &i40e_devices);
+       dev_info(&pf->pdev->dev, "Added LAN device PF%d bus=0x%02x func=0x%02x\n",
+                pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
+
+       /* Since in some cases register may have happened before a device gets
+        * added, we can schedule a subtask to go initiate the clients.
+        */
+       pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+       i40e_service_event_schedule(pf);
+
+out:
+       mutex_unlock(&i40e_device_mutex);
+       return ret;
+}
+
+/**
+ * i40e_lan_del_device - removes a lan device from the device list
+ * @pf: pointer to the board struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_lan_del_device(struct i40e_pf *pf)
+{
+       struct i40e_device *ldev, *tmp;
+       int ret = -ENODEV;
+
+       mutex_lock(&i40e_device_mutex);
+       list_for_each_entry_safe(ldev, tmp, &i40e_devices, list) {
+               if (ldev->pf == pf) {
+                       dev_info(&pf->pdev->dev, "Deleted LAN device PF%d bus=0x%02x func=0x%02x\n",
+                                pf->hw.pf_id, pf->hw.bus.device,
+                                pf->hw.bus.func);
+                       list_del(&ldev->list);
+                       kfree(ldev);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       mutex_unlock(&i40e_device_mutex);
+       return ret;
+}
+
+/**
+ * i40e_client_release - release client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_release(struct i40e_client *client)
+{
+       struct i40e_client_instance *cdev, *tmp;
+       struct i40e_pf *pf = NULL;
+       int ret = 0;
+
+       LIST_HEAD(cdevs_tmp);
+
+       mutex_lock(&i40e_client_instance_mutex);
+       list_for_each_entry_safe(cdev, tmp, &i40e_client_instances, list) {
+               if (strncmp(cdev->client->name, client->name,
+                           I40E_CLIENT_STR_LENGTH))
+                       continue;
+               if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+                       if (atomic_read(&cdev->ref_cnt) > 0) {
+                               ret = I40E_ERR_NOT_READY;
+                               goto out;
+                       }
+                       pf = (struct i40e_pf *)cdev->lan_info.pf;
+                       if (client->ops && client->ops->close)
+                               client->ops->close(&cdev->lan_info, client,
+                                                  false);
+                       i40e_client_release_qvlist(&cdev->lan_info);
+                       clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+
+                       dev_warn(&pf->pdev->dev,
+                                "Client %s instance for PF id %d closed\n",
+                                client->name, pf->hw.pf_id);
+               }
+               /* delete the client instance from the list */
+               list_del(&cdev->list);
+               list_add(&cdev->list, &cdevs_tmp);
+               atomic_dec(&client->ref_cnt);
+               dev_info(&pf->pdev->dev, "Deleted client instance of Client %s\n",
+                        client->name);
+       }
+out:
+       mutex_unlock(&i40e_client_instance_mutex);
+
+       /* free the client device and release its vsi */
+       list_for_each_entry_safe(cdev, tmp, &cdevs_tmp, list) {
+               kfree(cdev);
+       }
+       return ret;
+}
+
+/**
+ * i40e_client_prepare - prepare client specific resources
+ * @client: pointer to the registered client
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_prepare(struct i40e_client *client)
+{
+       struct i40e_device *ldev;
+       struct i40e_pf *pf;
+       int ret = 0;
+
+       mutex_lock(&i40e_device_mutex);
+       list_for_each_entry(ldev, &i40e_devices, list) {
+               pf = ldev->pf;
+               /* Start the client subtask */
+               pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
+               i40e_service_event_schedule(pf);
+       }
+       mutex_unlock(&i40e_device_mutex);
+       return ret;
+}
+
+/**
+ * i40e_client_virtchnl_send - TBD
+ * @ldev: pointer to L2 context
+ * @client: Client pointer
+ * @vf_id: absolute VF identifier
+ * @msg: message buffer
+ * @len: length of message buffer
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_virtchnl_send(struct i40e_info *ldev,
+                                    struct i40e_client *client,
+                                    u32 vf_id, u8 *msg, u16 len)
+{
+       struct i40e_pf *pf = ldev->pf;
+       struct i40e_hw *hw = &pf->hw;
+       i40e_status err;
+
+       err = i40e_aq_send_msg_to_vf(hw, vf_id, I40E_VIRTCHNL_OP_IWARP,
+                                    0, msg, len, NULL);
+       if (err)
+               dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
+                       err, hw->aq.asq_last_status);
+
+       return err;
+}
+
+/**
+ * i40e_client_setup_qvlist
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @qv_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_setup_qvlist(struct i40e_info *ldev,
+                                   struct i40e_client *client,
+                                   struct i40e_qvlist_info *qvlist_info)
+{
+       struct i40e_pf *pf = ldev->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_qv_info *qv_info;
+       u32 v_idx, i, reg_idx, reg;
+       u32 size;
+
+       size = sizeof(struct i40e_qvlist_info) +
+              (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
+       ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+       ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+       for (i = 0; i < qvlist_info->num_vectors; i++) {
+               qv_info = &qvlist_info->qv_info[i];
+               if (!qv_info)
+                       continue;
+               v_idx = qv_info->v_idx;
+
+               /* Validate vector id belongs to this client */
+               if ((v_idx >= (pf->iwarp_base_vector + pf->num_iwarp_msix)) ||
+                   (v_idx < pf->iwarp_base_vector))
+                       goto err;
+
+               ldev->qvlist_info->qv_info[i] = *qv_info;
+               reg_idx = I40E_PFINT_LNKLSTN(v_idx - 1);
+
+               if (qv_info->ceq_idx == I40E_QUEUE_INVALID_IDX) {
+                       /* Special case - No CEQ mapped on this vector */
+                       wr32(hw, reg_idx, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK);
+               } else {
+                       reg = (qv_info->ceq_idx &
+                              I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+                              (I40E_QUEUE_TYPE_PE_CEQ <<
+                              I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+                       wr32(hw, reg_idx, reg);
+
+                       reg = (I40E_PFINT_CEQCTL_CAUSE_ENA_MASK |
+                              (v_idx << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT) |
+                              (qv_info->itr_idx <<
+                               I40E_PFINT_CEQCTL_ITR_INDX_SHIFT) |
+                              (I40E_QUEUE_END_OF_LIST <<
+                               I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT));
+                       wr32(hw, I40E_PFINT_CEQCTL(qv_info->ceq_idx), reg);
+               }
+               if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+                       reg = (I40E_PFINT_AEQCTL_CAUSE_ENA_MASK |
+                              (v_idx << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT) |
+                              (qv_info->itr_idx <<
+                               I40E_PFINT_AEQCTL_ITR_INDX_SHIFT));
+
+                       wr32(hw, I40E_PFINT_AEQCTL, reg);
+               }
+       }
+
+       return 0;
+err:
+       kfree(ldev->qvlist_info);
+       ldev->qvlist_info = NULL;
+       return -EINVAL;
+}
+
+/**
+ * i40e_client_request_reset
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @level: reset level
+ **/
+static void i40e_client_request_reset(struct i40e_info *ldev,
+                                     struct i40e_client *client,
+                                     u32 reset_level)
+{
+       struct i40e_pf *pf = ldev->pf;
+
+       switch (reset_level) {
+       case I40E_CLIENT_RESET_LEVEL_PF:
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               break;
+       case I40E_CLIENT_RESET_LEVEL_CORE:
+               set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+               break;
+       default:
+               dev_warn(&pf->pdev->dev,
+                        "Client %s instance for PF id %d request an unsupported reset: %d.\n",
+                        client->name, pf->hw.pf_id, reset_level);
+               break;
+       }
+
+       i40e_service_event_schedule(pf);
+}
+
+/**
+ * i40e_client_update_vsi_ctxt
+ * @ldev: pointer to L2 context.
+ * @client: Client pointer.
+ * @is_vf: if this for the VF
+ * @vf_id: if is_vf true this carries the vf_id
+ * @flag: Any device level setting that needs to be done for PE
+ * @valid_flag: Bits in this match up and enable changing of flag bits
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
+                                      struct i40e_client *client,
+                                      bool is_vf, u32 vf_id,
+                                      u32 flag, u32 valid_flag)
+{
+       struct i40e_pf *pf = ldev->pf;
+       struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+       struct i40e_vsi_context ctxt;
+       bool update = true;
+       i40e_status err;
+
+       /* TODO: for now do not allow setting VF's VSI setting */
+       if (is_vf)
+               return -EINVAL;
+
+       ctxt.seid = pf->main_vsi_seid;
+       ctxt.pf_num = pf->hw.pf_id;
+       err = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
+       ctxt.flags = I40E_AQ_VSI_TYPE_PF;
+       if (err) {
+               dev_info(&pf->pdev->dev,
+                        "couldn't get PF vsi config, err %s aq_err %s\n",
+                        i40e_stat_str(&pf->hw, err),
+                        i40e_aq_str(&pf->hw,
+                                    pf->hw.aq.asq_last_status));
+               return -ENOENT;
+       }
+
+       if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+           (flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+               ctxt.info.valid_sections =
+                       cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+               ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+       } else if ((valid_flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE) &&
+                 !(flag & I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE)) {
+               ctxt.info.valid_sections =
+                       cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+               ctxt.info.queueing_opt_flags &= ~I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+       } else {
+               update = false;
+               dev_warn(&pf->pdev->dev,
+                        "Client %s instance for PF id %d request an unsupported Config: %x.\n",
+                        client->name, pf->hw.pf_id, flag);
+       }
+
+       if (update) {
+               err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
+               if (err) {
+                       dev_info(&pf->pdev->dev,
+                                "update VSI ctxt for PE failed, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, err),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+               }
+       }
+       return err;
+}
+
+/**
+ * i40e_register_client - Register a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_register_client(struct i40e_client *client)
+{
+       int ret = 0;
+       enum i40e_vsi_type vsi_type;
+
+       if (!client) {
+               ret = -EIO;
+               goto out;
+       }
+
+       if (strlen(client->name) == 0) {
+               pr_info("i40e: Failed to register client with no name\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       mutex_lock(&i40e_client_mutex);
+       if (i40e_client_is_registered(client)) {
+               pr_info("i40e: Client %s has already been registered!\n",
+                       client->name);
+               mutex_unlock(&i40e_client_mutex);
+               ret = -EEXIST;
+               goto out;
+       }
+
+       if ((client->version.major != I40E_CLIENT_VERSION_MAJOR) ||
+           (client->version.minor != I40E_CLIENT_VERSION_MINOR)) {
+               pr_info("i40e: Failed to register client %s due to mismatched client interface version\n",
+                       client->name);
+               pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
+                       client->version.major, client->version.minor,
+                       client->version.build,
+                       i40e_client_interface_version_str);
+               mutex_unlock(&i40e_client_mutex);
+               ret = -EIO;
+               goto out;
+       }
+
+       vsi_type = i40e_client_type_to_vsi_type(client->type);
+       if (vsi_type == I40E_VSI_TYPE_UNKNOWN) {
+               pr_info("i40e: Failed to register client %s due to unknown client type %d\n",
+                       client->name, client->type);
+               mutex_unlock(&i40e_client_mutex);
+               ret = -EIO;
+               goto out;
+       }
+       list_add(&client->list, &i40e_clients);
+       set_bit(__I40E_CLIENT_REGISTERED, &client->state);
+       mutex_unlock(&i40e_client_mutex);
+
+       if (i40e_client_prepare(client)) {
+               ret = -EIO;
+               goto out;
+       }
+
+       pr_info("i40e: Registered client %s with return code %d\n",
+               client->name, ret);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(i40e_register_client);
+
+/**
+ * i40e_unregister_client - Unregister a i40e client driver with the L2 driver
+ * @client: pointer to the i40e_client struct
+ *
+ * Returns 0 on success or non-0 on error
+ **/
+int i40e_unregister_client(struct i40e_client *client)
+{
+       int ret = 0;
+
+       /* When a unregister request comes through we would have to send
+        * a close for each of the client instances that were opened.
+        * client_release function is called to handle this.
+        */
+       if (!client || i40e_client_release(client)) {
+               ret = -EIO;
+               goto out;
+       }
+
+       /* TODO: check if device is in reset, or if that matters? */
+       mutex_lock(&i40e_client_mutex);
+       if (!i40e_client_is_registered(client)) {
+               pr_info("i40e: Client %s has not been registered\n",
+                       client->name);
+               mutex_unlock(&i40e_client_mutex);
+               ret = -ENODEV;
+               goto out;
+       }
+       if (atomic_read(&client->ref_cnt) == 0) {
+               clear_bit(__I40E_CLIENT_REGISTERED, &client->state);
+               list_del(&client->list);
+               pr_info("i40e: Unregistered client %s with return code %d\n",
+                       client->name, ret);
+       } else {
+               ret = I40E_ERR_NOT_READY;
+               pr_err("i40e: Client %s failed unregister - client has open instances\n",
+                      client->name);
+       }
+
+       mutex_unlock(&i40e_client_mutex);
+out:
+       return ret;
+}
+EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
new file mode 100644 (file)
index 0000000..bf6b453
--- /dev/null
@@ -0,0 +1,232 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2015 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_CLIENT_H_
+#define _I40E_CLIENT_H_
+
+#define I40E_CLIENT_STR_LENGTH 10
+
+/* Client interface version should be updated anytime there is a change in the
+ * existing APIs or data structures.
+ */
+#define I40E_CLIENT_VERSION_MAJOR 0
+#define I40E_CLIENT_VERSION_MINOR 01
+#define I40E_CLIENT_VERSION_BUILD 00
+#define I40E_CLIENT_VERSION_STR     \
+       XSTRINGIFY(I40E_CLIENT_VERSION_MAJOR) "." \
+       XSTRINGIFY(I40E_CLIENT_VERSION_MINOR) "." \
+       XSTRINGIFY(I40E_CLIENT_VERSION_BUILD)
+
+struct i40e_client_version {
+       u8 major;
+       u8 minor;
+       u8 build;
+       u8 rsvd;
+};
+
+enum i40e_client_state {
+       __I40E_CLIENT_NULL,
+       __I40E_CLIENT_REGISTERED
+};
+
+enum i40e_client_instance_state {
+       __I40E_CLIENT_INSTANCE_NONE,
+       __I40E_CLIENT_INSTANCE_OPENED,
+};
+
+enum i40e_client_type {
+       I40E_CLIENT_IWARP,
+       I40E_CLIENT_VMDQ2
+};
+
+struct i40e_ops;
+struct i40e_client;
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+ */
+#define I40E_QUEUE_TYPE_PE_AEQ  0x80
+#define I40E_QUEUE_INVALID_IDX 0xFFFF
+
+struct i40e_qv_info {
+       u32 v_idx; /* msix_vector */
+       u16 ceq_idx;
+       u16 aeq_idx;
+       u8 itr_idx;
+};
+
+struct i40e_qvlist_info {
+       u32 num_vectors;
+       struct i40e_qv_info qv_info[1];
+};
+
+#define I40E_CLIENT_MSIX_ALL 0xFFFFFFFF
+
+/* set of LAN parameters useful for clients managed by LAN */
+
+/* Struct to hold per priority info */
+struct i40e_prio_qos_params {
+       u16 qs_handle; /* qs handle for prio */
+       u8 tc; /* TC mapped to prio */
+       u8 reserved;
+};
+
+#define I40E_CLIENT_MAX_USER_PRIORITY        8
+/* Struct to hold Client QoS */
+struct i40e_qos_params {
+       struct i40e_prio_qos_params prio_qos[I40E_CLIENT_MAX_USER_PRIORITY];
+};
+
+struct i40e_params {
+       struct i40e_qos_params qos;
+       u16 mtu;
+};
+
+/* Structure to hold Lan device info for a client device */
+struct i40e_info {
+       struct i40e_client_version version;
+       u8 lanmac[6];
+       struct net_device *netdev;
+       struct pci_dev *pcidev;
+       u8 __iomem *hw_addr;
+       u8 fid; /* function id, PF id or VF id */
+#define I40E_CLIENT_FTYPE_PF 0
+#define I40E_CLIENT_FTYPE_VF 1
+       u8 ftype; /* function type, PF or VF */
+       void *pf;
+
+       /* All L2 params that could change during the life span of the PF
+        * and needs to be communicated to the client when they change
+        */
+       struct i40e_qvlist_info *qvlist_info;
+       struct i40e_params params;
+       struct i40e_ops *ops;
+
+       u16 msix_count;  /* number of msix vectors*/
+       /* Array down below will be dynamically allocated based on msix_count */
+       struct msix_entry *msix_entries;
+       u16 itr_index; /* Which ITR index the PE driver is suppose to use */
+       u16 fw_maj_ver;                 /* firmware major version */
+       u16 fw_min_ver;                 /* firmware minor version */
+       u32 fw_build;                   /* firmware build number */
+};
+
+#define I40E_CLIENT_RESET_LEVEL_PF   1
+#define I40E_CLIENT_RESET_LEVEL_CORE 2
+#define I40E_CLIENT_VSI_FLAG_TCP_PACKET_ENABLE  BIT(1)
+
+struct i40e_ops {
+       /* setup_q_vector_list enables queues with a particular vector */
+       int (*setup_qvlist)(struct i40e_info *ldev, struct i40e_client *client,
+                           struct i40e_qvlist_info *qv_info);
+
+       int (*virtchnl_send)(struct i40e_info *ldev, struct i40e_client *client,
+                            u32 vf_id, u8 *msg, u16 len);
+
+       /* If the PE Engine is unresponsive, RDMA driver can request a reset.
+        * The level helps determine the level of reset being requested.
+        */
+       void (*request_reset)(struct i40e_info *ldev,
+                             struct i40e_client *client, u32 level);
+
+       /* API for the RDMA driver to set certain VSI flags that control
+        * PE Engine.
+        */
+       int (*update_vsi_ctxt)(struct i40e_info *ldev,
+                              struct i40e_client *client,
+                              bool is_vf, u32 vf_id,
+                              u32 flag, u32 valid_flag);
+};
+
+struct i40e_client_ops {
+       /* Should be called from register_client() or whenever PF is ready
+        * to create a specific client instance.
+        */
+       int (*open)(struct i40e_info *ldev, struct i40e_client *client);
+
+       /* Should be called when netdev is unavailable or when unregister
+        * call comes in. If the close is happenening due to a reset being
+        * triggered set the reset bit to true.
+        */
+       void (*close)(struct i40e_info *ldev, struct i40e_client *client,
+                     bool reset);
+
+       /* called when some l2 managed parameters changes - mtu */
+       void (*l2_param_change)(struct i40e_info *ldev,
+                               struct i40e_client *client,
+                               struct i40e_params *params);
+
+       int (*virtchnl_receive)(struct i40e_info *ldev,
+                               struct i40e_client *client, u32 vf_id,
+                               u8 *msg, u16 len);
+
+       /* called when a VF is reset by the PF */
+       void (*vf_reset)(struct i40e_info *ldev,
+                        struct i40e_client *client, u32 vf_id);
+
+       /* called when the number of VFs changes */
+       void (*vf_enable)(struct i40e_info *ldev,
+                         struct i40e_client *client, u32 num_vfs);
+
+       /* returns true if VF is capable of specified offload */
+       int (*vf_capable)(struct i40e_info *ldev,
+                         struct i40e_client *client, u32 vf_id);
+};
+
+/* Client device */
+struct i40e_client_instance {
+       struct list_head list;
+       struct i40e_info lan_info;
+       struct i40e_client *client;
+       unsigned long  state;
+       /* A count of all the in-progress calls to the client */
+       atomic_t ref_cnt;
+};
+
+struct i40e_client {
+       struct list_head list;          /* list of registered clients */
+       char name[I40E_CLIENT_STR_LENGTH];
+       struct i40e_client_version version;
+       unsigned long state;            /* client state */
+       atomic_t ref_cnt;  /* Count of all the client devices of this kind */
+       u32 flags;
+#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE      BIT(0)
+#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS      BIT(2)
+       enum i40e_client_type type;
+       struct i40e_client_ops *ops;    /* client ops provided by the client */
+};
+
+static inline bool i40e_client_is_registered(struct i40e_client *client)
+{
+       return test_bit(__I40E_CLIENT_REGISTERED, &client->state);
+}
+
+/* used by clients */
+int i40e_register_client(struct i40e_client *client);
+int i40e_unregister_client(struct i40e_client *client);
+
+#endif /* _I40E_CLIENT_H_ */
index 8f3b53e0dc46c28965d00ea994940949404361be..1df2629d37059b88502fca4cae95f0c06bf3764d 100644 (file)
@@ -290,7 +290,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
  *
  * If not already scheduled, this puts the task into the work queue
  **/
-static void i40e_service_event_schedule(struct i40e_pf *pf)
+void i40e_service_event_schedule(struct i40e_pf *pf)
 {
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
@@ -2212,7 +2212,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
        netdev->mtu = new_mtu;
        if (netif_running(netdev))
                i40e_vsi_reinit_locked(vsi);
-
+       i40e_notify_client_of_l2_param_changes(vsi);
        return 0;
 }
 
@@ -4166,6 +4166,9 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
                free_irq(pf->msix_entries[0].vector, pf);
        }
 
+       i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector,
+                     I40E_IWARP_IRQ_PILE_ID);
+
        i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
        for (i = 0; i < pf->num_alloc_vsi; i++)
                if (pf->vsi[i])
@@ -4209,12 +4212,17 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
  **/
 static void i40e_vsi_close(struct i40e_vsi *vsi)
 {
+       bool reset = false;
+
        if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
                i40e_down(vsi);
        i40e_vsi_free_irq(vsi);
        i40e_vsi_free_tx_resources(vsi);
        i40e_vsi_free_rx_resources(vsi);
        vsi->current_netdev_flags = 0;
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
+               reset = true;
+       i40e_notify_client_of_netdev_close(vsi, reset);
 }
 
 /**
@@ -4831,6 +4839,12 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
        ctxt.info = vsi->info;
        i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
 
+       if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+               ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+               ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+       }
+
        /* Update the VSI after updating the VSI queue-mapping information */
        ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
        if (ret) {
@@ -4974,6 +4988,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                        if (pf->vsi[v]->netdev)
                                i40e_dcbnl_set_all(pf->vsi[v]);
                }
+               i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
        }
 }
 
@@ -5173,6 +5188,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
                }
                i40e_fdir_filter_restore(vsi);
        }
+
+       /* On the next run of the service_task, notify any clients of the new
+        * opened netdev
+        */
+       pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
        i40e_service_event_schedule(pf);
 
        return 0;
@@ -5351,6 +5371,8 @@ int i40e_open(struct net_device *netdev)
        geneve_get_rx_port(netdev);
 #endif
 
+       i40e_notify_client_of_netdev_open(vsi);
+
        return 0;
 }
 
@@ -6015,6 +6037,7 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
        case I40E_VSI_SRIOV:
        case I40E_VSI_VMDQ2:
        case I40E_VSI_CTRL:
+       case I40E_VSI_IWARP:
        case I40E_VSI_MIRROR:
        default:
                /* there is no notification for other VSIs */
@@ -7116,6 +7139,7 @@ static void i40e_service_task(struct work_struct *work)
        i40e_vc_process_vflr_event(pf);
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
+       i40e_client_subtask(pf);
        i40e_sync_filters_subtask(pf);
        i40e_sync_udp_filters_subtask(pf);
        i40e_clean_adminq_subtask(pf);
@@ -7520,6 +7544,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
        int vectors_left;
        int v_budget, i;
        int v_actual;
+       int iwarp_requested = 0;
 
        if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
                return -ENODEV;
@@ -7533,6 +7558,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
         *              is governed by number of cpus in the system.
         *      - assumes symmetric Tx/Rx pairing
         *   - The number of VMDq pairs
+        *   - The CPU count within the NUMA node if iWARP is enabled
 #ifdef I40E_FCOE
         *   - The number of FCOE qps.
 #endif
@@ -7579,6 +7605,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
        }
 
 #endif
+       /* can we reserve enough for iWARP? */
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               if (!vectors_left)
+                       pf->num_iwarp_msix = 0;
+               else if (vectors_left < pf->num_iwarp_msix)
+                       pf->num_iwarp_msix = 1;
+               v_budget += pf->num_iwarp_msix;
+               vectors_left -= pf->num_iwarp_msix;
+       }
+
        /* any vectors left over go for VMDq support */
        if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
                int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
@@ -7613,6 +7649,8 @@ static int i40e_init_msix(struct i40e_pf *pf)
                 * of these features based on the policy and at the end disable
                 * the features that did not get any vectors.
                 */
+               iwarp_requested = pf->num_iwarp_msix;
+               pf->num_iwarp_msix = 0;
 #ifdef I40E_FCOE
                pf->num_fcoe_qps = 0;
                pf->num_fcoe_msix = 0;
@@ -7651,17 +7689,33 @@ static int i40e_init_msix(struct i40e_pf *pf)
                        pf->num_lan_msix = 1;
                        break;
                case 3:
+                       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+                               pf->num_lan_msix = 1;
+                               pf->num_iwarp_msix = 1;
+                       } else {
+                               pf->num_lan_msix = 2;
+                       }
 #ifdef I40E_FCOE
                        /* give one vector to FCoE */
                        if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
                                pf->num_lan_msix = 1;
                                pf->num_fcoe_msix = 1;
                        }
-#else
-                       pf->num_lan_msix = 2;
 #endif
                        break;
                default:
+                       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+                               pf->num_iwarp_msix = min_t(int, (vec / 3),
+                                                iwarp_requested);
+                               pf->num_vmdq_vsis = min_t(int, (vec / 3),
+                                                 I40E_DEFAULT_NUM_VMDQ_VSI);
+                       } else {
+                               pf->num_vmdq_vsis = min_t(int, (vec / 2),
+                                                 I40E_DEFAULT_NUM_VMDQ_VSI);
+                       }
+                       pf->num_lan_msix = min_t(int,
+                              (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)),
+                                                             pf->num_lan_msix);
 #ifdef I40E_FCOE
                        /* give one vector to FCoE */
                        if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
@@ -7669,8 +7723,6 @@ static int i40e_init_msix(struct i40e_pf *pf)
                                vec--;
                        }
 #endif
-                       /* give the rest to the PF */
-                       pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
                        break;
                }
        }
@@ -7680,6 +7732,12 @@ static int i40e_init_msix(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
        }
+
+       if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
+           (pf->num_iwarp_msix == 0)) {
+               dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n");
+               pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+       }
 #ifdef I40E_FCOE
 
        if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
@@ -7771,6 +7829,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
                vectors = i40e_init_msix(pf);
                if (vectors < 0) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
+                                      I40E_FLAG_IWARP_ENABLED  |
 #ifdef I40E_FCOE
                                       I40E_FLAG_FCOE_ENABLED   |
 #endif
@@ -8373,6 +8432,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
        }
 
+       if (pf->hw.func_caps.iwarp) {
+               pf->flags |= I40E_FLAG_IWARP_ENABLED;
+               /* IWARP needs one extra vector for CQP just like MISC.*/
+               pf->num_iwarp_msix = (int)num_online_cpus() + 1;
+       }
+
 #ifdef I40E_FCOE
        i40e_init_pf_fcoe(pf);
 
@@ -9216,6 +9281,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                                cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
                }
 
+               if (vsi->back->flags & I40E_FLAG_IWARP_ENABLED) {
+                       ctxt.info.valid_sections |=
+                               cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+                       ctxt.info.queueing_opt_flags |=
+                                               I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+               }
+
                ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
                ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
                if (pf->vf[vsi->vf_id].spoofchk) {
@@ -9239,6 +9311,10 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                break;
 
 #endif /* I40E_FCOE */
+       case I40E_VSI_IWARP:
+               /* send down message to iWARP */
+               break;
+
        default:
                return -ENODEV;
        }
@@ -10350,6 +10426,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+                              I40E_FLAG_IWARP_ENABLED  |
 #ifdef I40E_FCOE
                               I40E_FLAG_FCOE_ENABLED   |
 #endif
@@ -10367,6 +10444,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+                              I40E_FLAG_IWARP_ENABLED  |
 #ifdef I40E_FCOE
                               I40E_FLAG_FCOE_ENABLED   |
 #endif
@@ -10959,7 +11037,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 #endif /* CONFIG_PCI_IOV */
 
-       pfs_found++;
+       if (pf->flags & I40E_FLAG_IWARP_ENABLED) {
+               pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile,
+                                                     pf->num_iwarp_msix,
+                                                     I40E_IWARP_IRQ_PILE_ID);
+               if (pf->iwarp_base_vector < 0) {
+                       dev_info(&pdev->dev,
+                                "failed to get tracking for %d vectors for IWARP err=%d\n",
+                                pf->num_iwarp_msix, pf->iwarp_base_vector);
+                       pf->flags &= ~I40E_FLAG_IWARP_ENABLED;
+               }
+       }
 
        i40e_dbg_pf_init(pf);
 
@@ -10970,6 +11058,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        mod_timer(&pf->service_timer,
                  round_jiffies(jiffies + pf->service_timer_period));
 
+       /* add this PF to client device list and launch a client service task */
+       err = i40e_lan_add_device(pf);
+       if (err)
+               dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n",
+                        err);
+
 #ifdef I40E_FCOE
        /* create FCoE interface */
        i40e_fcoe_vsi_setup(pf);
@@ -11140,6 +11234,13 @@ static void i40e_remove(struct pci_dev *pdev)
        if (pf->vsi[pf->lan_vsi])
                i40e_vsi_release(pf->vsi[pf->lan_vsi]);
 
+       /* remove attached clients */
+       ret_code = i40e_lan_del_device(pf);
+       if (ret_code) {
+               dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
+                        ret_code);
+       }
+
        /* shutdown and destroy the HMC */
        if (pf->hw.hmc.hmc_obj) {
                ret_code = i40e_shutdown_lan_hmc(&pf->hw);
index dd2da356d9a1bb6628fcd0825f400350fc037e6f..79e975d29a1e0b70eb4989c8f773da9590b8003c 100644 (file)
@@ -78,7 +78,7 @@ enum i40e_debug_mask {
        I40E_DEBUG_DCB                  = 0x00000400,
        I40E_DEBUG_DIAG                 = 0x00000800,
        I40E_DEBUG_FD                   = 0x00001000,
-
+       I40E_DEBUG_IWARP                = 0x00F00000,
        I40E_DEBUG_AQ_MESSAGE           = 0x01000000,
        I40E_DEBUG_AQ_DESCRIPTOR        = 0x02000000,
        I40E_DEBUG_AQ_DESC_BUFFER       = 0x04000000,
@@ -144,6 +144,7 @@ enum i40e_vsi_type {
        I40E_VSI_MIRROR = 5,
        I40E_VSI_SRIOV  = 6,
        I40E_VSI_FDIR   = 7,
+       I40E_VSI_IWARP  = 8,
        I40E_VSI_TYPE_UNKNOWN
 };
 
index 3226946bf3d44076daacaf18fc6ad71158a57fca..ab866cf3dc18822b9d4bfcfe1362620d3eadaff5 100644 (file)
@@ -81,6 +81,9 @@ enum i40e_virtchnl_ops {
        I40E_VIRTCHNL_OP_GET_STATS = 15,
        I40E_VIRTCHNL_OP_FCOE = 16,
        I40E_VIRTCHNL_OP_EVENT = 17,
+       I40E_VIRTCHNL_OP_IWARP = 20,
+       I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
+       I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
 };
 
 /* Virtual channel message descriptor. This overlays the admin queue
@@ -348,6 +351,37 @@ struct i40e_virtchnl_pf_event {
        int severity;
 };
 
+/* I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP
+ * VF uses this message to request PF to map IWARP vectors to IWARP queues.
+ * The request for this originates from the VF IWARP driver through
+ * a client interface between VF LAN and VF IWARP driver.
+ * A vector could have an AEQ and CEQ attached to it although
+ * there is a single AEQ per VF IWARP instance in which case
+ * most vectors will have an INVALID_IDX for aeq and valid idx for ceq.
+ * There will never be a case where there will be multiple CEQs attached
+ * to a single vector.
+ * PF configures interrupt mapping and returns status.
+ */
+
+/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
+ * In order for us to keep the interface simple, SW will define a
+ * unique type value for AEQ.
+*/
+#define I40E_QUEUE_TYPE_PE_AEQ  0x80
+#define I40E_QUEUE_INVALID_IDX  0xFFFF
+
+struct i40e_virtchnl_iwarp_qv_info {
+       u32 v_idx; /* msix_vector */
+       u16 ceq_idx;
+       u16 aeq_idx;
+       u8 itr_idx;
+};
+
+struct i40e_virtchnl_iwarp_qvlist_info {
+       u32 num_vectors;
+       struct i40e_virtchnl_iwarp_qv_info qv_info[1];
+};
+
 /* VF reset states - these are written into the RSTAT register:
  * I40E_VFGEN_RSTAT1 on the PF
  * I40E_VFGEN_RSTAT on the VF
index 63e62f9aec6ef45e73e08970d5fca6152ac102c3..bf35b64f6a4a0094b5c7edec399909898acd86f4 100644 (file)
@@ -351,6 +351,136 @@ irq_list_done:
        i40e_flush(hw);
 }
 
+/**
+ * i40e_release_iwarp_qvlist
+ * @vf: pointer to the VF.
+ *
+ **/
+static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+       u32 msix_vf;
+       u32 i;
+
+       if (!vf->qvlist_info)
+               return;
+
+       msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+       for (i = 0; i < qvlist_info->num_vectors; i++) {
+               struct i40e_virtchnl_iwarp_qv_info *qv_info;
+               u32 next_q_index, next_q_type;
+               struct i40e_hw *hw = &pf->hw;
+               u32 v_idx, reg_idx, reg;
+
+               qv_info = &qvlist_info->qv_info[i];
+               if (!qv_info)
+                       continue;
+               v_idx = qv_info->v_idx;
+               if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+                       /* Figure out the queue after CEQ and make that the
+                        * first queue.
+                        */
+                       reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+                       reg = rd32(hw, I40E_VPINT_CEQCTL(reg_idx));
+                       next_q_index = (reg & I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK)
+                                       >> I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT;
+                       next_q_type = (reg & I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK)
+                                       >> I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT;
+
+                       reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+                       reg = (next_q_index &
+                              I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+                              (next_q_type <<
+                              I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+                       wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+               }
+       }
+       kfree(vf->qvlist_info);
+       vf->qvlist_info = NULL;
+}
+
+/**
+ * i40e_config_iwarp_qvlist
+ * @vf: pointer to the VF info
+ * @qvlist_info: queue and vector list
+ *
+ * Return 0 on success or < 0 on error
+ **/
+static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
+                                   struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info)
+{
+       struct i40e_pf *pf = vf->pf;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_virtchnl_iwarp_qv_info *qv_info;
+       u32 v_idx, i, reg_idx, reg;
+       u32 next_q_idx, next_q_type;
+       u32 msix_vf, size;
+
+       size = sizeof(struct i40e_virtchnl_iwarp_qvlist_info) +
+              (sizeof(struct i40e_virtchnl_iwarp_qv_info) *
+                                               (qvlist_info->num_vectors - 1));
+       vf->qvlist_info = kzalloc(size, GFP_KERNEL);
+       vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
+
+       msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+       for (i = 0; i < qvlist_info->num_vectors; i++) {
+               qv_info = &qvlist_info->qv_info[i];
+               if (!qv_info)
+                       continue;
+               v_idx = qv_info->v_idx;
+
+               /* Validate vector id belongs to this vf */
+               if (!i40e_vc_isvalid_vector_id(vf, v_idx))
+                       goto err;
+
+               vf->qvlist_info->qv_info[i] = *qv_info;
+
+               reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+               /* We might be sharing the interrupt, so get the first queue
+                * index and type, push it down the list by adding the new
+                * queue on top. Also link it with the new queue in CEQCTL.
+                */
+               reg = rd32(hw, I40E_VPINT_LNKLSTN(reg_idx));
+               next_q_idx = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) >>
+                               I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT);
+               next_q_type = ((reg & I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK) >>
+                               I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+
+               if (qv_info->ceq_idx != I40E_QUEUE_INVALID_IDX) {
+                       reg_idx = (msix_vf - 1) * vf->vf_id + qv_info->ceq_idx;
+                       reg = (I40E_VPINT_CEQCTL_CAUSE_ENA_MASK |
+                       (v_idx << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT) |
+                       (qv_info->itr_idx << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT) |
+                       (next_q_type << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT) |
+                       (next_q_idx << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT));
+                       wr32(hw, I40E_VPINT_CEQCTL(reg_idx), reg);
+
+                       reg_idx = ((msix_vf - 1) * vf->vf_id) + (v_idx - 1);
+                       reg = (qv_info->ceq_idx &
+                              I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+                              (I40E_QUEUE_TYPE_PE_CEQ <<
+                              I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT);
+                       wr32(hw, I40E_VPINT_LNKLSTN(reg_idx), reg);
+               }
+
+               if (qv_info->aeq_idx != I40E_QUEUE_INVALID_IDX) {
+                       reg = (I40E_VPINT_AEQCTL_CAUSE_ENA_MASK |
+                       (v_idx << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT) |
+                       (qv_info->itr_idx << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT));
+
+                       wr32(hw, I40E_VPINT_AEQCTL(vf->vf_id), reg);
+               }
+       }
+
+       return 0;
+err:
+       kfree(vf->qvlist_info);
+       vf->qvlist_info = NULL;
+       return -EINVAL;
+}
+
 /**
  * i40e_config_vsi_tx_queue
  * @vf: pointer to the VF info
@@ -849,9 +979,11 @@ complete_reset:
        /* reallocate VF resources to reset the VSI state */
        i40e_free_vf_res(vf);
        if (!i40e_alloc_vf_res(vf)) {
+               int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
                i40e_enable_vf_mappings(vf);
                set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
                clear_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
+               i40e_notify_client_of_vf_reset(pf, abs_vf_id);
        }
        /* tell the VF the reset is done */
        wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -876,11 +1008,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
        while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
                usleep_range(1000, 2000);
 
-       for (i = 0; i < pf->num_alloc_vfs; i++)
-               if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
-                       i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
-                                              false);
-
+       i40e_notify_client_of_vf_enable(pf, 0);
        for (i = 0; i < pf->num_alloc_vfs; i++)
                if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states))
                        i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx],
@@ -952,6 +1080,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
                        goto err_iov;
                }
        }
+       i40e_notify_client_of_vf_enable(pf, num_alloc_vfs);
        /* allocate memory */
        vfs = kcalloc(num_alloc_vfs, sizeof(struct i40e_vf), GFP_KERNEL);
        if (!vfs) {
@@ -1205,6 +1334,13 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
        vsi = pf->vsi[vf->lan_vsi_idx];
        if (!vsi->info.pvid)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+
+       if (i40e_vf_client_capable(pf, vf->vf_id, I40E_CLIENT_IWARP) &&
+           (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_IWARP)) {
+               vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_IWARP;
+               set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
+       }
+
        if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
                if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
                        vfres->vf_offload_flags |=
@@ -1813,6 +1949,72 @@ error_param:
        return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_DEL_VLAN, aq_ret);
 }
 
+/**
+ * i40e_vc_iwarp_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+       struct i40e_pf *pf = vf->pf;
+       int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
+                                    msg, msglen);
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_IWARP,
+                                      aq_ret);
+}
+
+/**
+ * i40e_vc_iwarp_qvmap_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @config: config qvmap or release it
+ *
+ * called from the VF for the iwarp msgs
+ **/
+static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, u16 msglen,
+                                  bool config)
+{
+       struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info =
+                               (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+       i40e_status aq_ret = 0;
+
+       if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+           !test_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states)) {
+               aq_ret = I40E_ERR_PARAM;
+               goto error_param;
+       }
+
+       if (config) {
+               if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+                       aq_ret = I40E_ERR_PARAM;
+       } else {
+               i40e_release_iwarp_qvlist(vf);
+       }
+
+error_param:
+       /* send the response to the VF */
+       return i40e_vc_send_resp_to_vf(vf,
+                              config ? I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP :
+                              I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP,
+                              aq_ret);
+}
+
 /**
  * i40e_vc_validate_vf_msg
  * @vf: pointer to the VF info
@@ -1908,6 +2110,32 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
        case I40E_VIRTCHNL_OP_GET_STATS:
                valid_len = sizeof(struct i40e_virtchnl_queue_select);
                break;
+       case I40E_VIRTCHNL_OP_IWARP:
+               /* These messages are opaque to us and will be validated in
+                * the RDMA client code. We just need to check for nonzero
+                * length. The firmware will enforce max length restrictions.
+                */
+               if (msglen)
+                       valid_len = msglen;
+               else
+                       err_msg_format = true;
+               break;
+       case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+               valid_len = 0;
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+               valid_len = sizeof(struct i40e_virtchnl_iwarp_qvlist_info);
+               if (msglen >= valid_len) {
+                       struct i40e_virtchnl_iwarp_qvlist_info *qv =
+                               (struct i40e_virtchnl_iwarp_qvlist_info *)msg;
+                       if (qv->num_vectors == 0) {
+                               err_msg_format = true;
+                               break;
+                       }
+                       valid_len += ((qv->num_vectors - 1) *
+                               sizeof(struct i40e_virtchnl_iwarp_qv_info));
+               }
+               break;
        /* These are always errors coming from the VF. */
        case I40E_VIRTCHNL_OP_EVENT:
        case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -1997,6 +2225,15 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
        case I40E_VIRTCHNL_OP_GET_STATS:
                ret = i40e_vc_get_stats_msg(vf, msg, msglen);
                break;
+       case I40E_VIRTCHNL_OP_IWARP:
+               ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+               break;
+       case I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+               ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, true);
+               break;
+       case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
+               ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
+               break;
        case I40E_VIRTCHNL_OP_UNKNOWN:
        default:
                dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
index da44995def42f3b034a1b08780a0c901335e02c7..1da4d9ac4c7ab2b7ec1082d78dcb849882b4c47f 100644 (file)
@@ -58,6 +58,7 @@ enum i40e_queue_ctrl {
 enum i40e_vf_states {
        I40E_VF_STAT_INIT = 0,
        I40E_VF_STAT_ACTIVE,
+       I40E_VF_STAT_IWARPENA,
        I40E_VF_STAT_FCOEENA,
        I40E_VF_STAT_DISABLED,
 };
@@ -66,6 +67,7 @@ enum i40e_vf_states {
 enum i40e_vf_capabilities {
        I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
        I40E_VIRTCHNL_VF_CAP_L2,
+       I40E_VIRTCHNL_VF_CAP_IWARP,
 };
 
 /* VF information structure */
@@ -106,6 +108,8 @@ struct i40e_vf {
        bool link_forced;
        bool link_up;           /* only valid if VF link is forced */
        bool spoofchk;
+       /* RDMA Client */
+       struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
 };
 
 void i40e_free_vfs(struct i40e_pf *pf);
index d66c690a859773d24db2cc835f8c365135023bf4..e97094598b2dcaf17a90e7d600eae6f6872ba21e 100644 (file)
@@ -157,7 +157,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
                [29] = "802.1ad offload support",
                [31] = "Modifying loopback source checks using UPDATE_QP support",
                [32] = "Loopback source checks support",
-               [33] = "RoCEv2 support"
+               [33] = "RoCEv2 support",
+               [34] = "DMFS Sniffer support (UC & MC)"
        };
        int i;
 
@@ -810,6 +811,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
        dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+       if (field & 0x20)
+               dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER;
        MLX4_GET(field, outbox, QUERY_DEV_CAP_PORT_BEACON_OFFSET);
        if (field & 0x80)
                dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_BEACON;
index 1d4e2e054647ae3da57bfad23f4460f622ca85b0..42d8de892bfebd39213aa0fa15959f64799780f1 100644 (file)
@@ -752,8 +752,10 @@ static const u8 __promisc_mode[] = {
        [MLX4_FS_REGULAR]   = 0x0,
        [MLX4_FS_ALL_DEFAULT] = 0x1,
        [MLX4_FS_MC_DEFAULT] = 0x3,
-       [MLX4_FS_UC_SNIFFER] = 0x4,
-       [MLX4_FS_MC_SNIFFER] = 0x5,
+       [MLX4_FS_MIRROR_RX_PORT] = 0x4,
+       [MLX4_FS_MIRROR_SX_PORT] = 0x5,
+       [MLX4_FS_UC_SNIFFER] = 0x6,
+       [MLX4_FS_MC_SNIFFER] = 0x7,
 };
 
 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
index 037fc4cdf5af675e811f5f50950816d8dfeaff51..ebb4036b98e5773343e25f4bd91a1b508b702613 100644 (file)
@@ -407,6 +407,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
 const char *mlx5_command_str(int command)
 {
        switch (command) {
+       case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+               return "QUERY_HCA_VPORT_CONTEXT";
+
+       case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+               return "MODIFY_HCA_VPORT_CONTEXT";
+
        case MLX5_CMD_OP_QUERY_HCA_CAP:
                return "QUERY_HCA_CAP";
 
index aac071a7e830b5fd777da7a5e4d014d802ad70d0..6ef0bfded2ba2f87fc4ad0ccdf54f8ca0556cbb5 100644 (file)
@@ -515,7 +515,7 @@ struct mlx5e_priv {
        struct mlx5_uar            cq_uar;
        u32                        pdn;
        u32                        tdn;
-       struct mlx5_core_mr        mr;
+       struct mlx5_core_mkey      mkey;
        struct mlx5e_rq            drop_rq;
 
        struct mlx5e_channel     **channel;
index d4e1c30452009718d9761a8a4949f5195d7f4c59..43a148939557b188d4fb9301d54be36342aa2688 100644 (file)
@@ -982,7 +982,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->cpu      = cpu;
        c->pdev     = &priv->mdev->pdev->dev;
        c->netdev   = priv->netdev;
-       c->mkey_be  = cpu_to_be32(priv->mr.key);
+       c->mkey_be  = cpu_to_be32(priv->mkey.key);
        c->num_tc   = priv->params.num_tc;
 
        mlx5e_build_channeltc_to_txq_map(priv, ix);
@@ -2194,7 +2194,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 }
 
 static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
-                            struct mlx5_core_mr *mr)
+                            struct mlx5_core_mkey *mkey)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_create_mkey_mbox_in *in;
@@ -2210,7 +2210,7 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
        in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
        in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
 
-       err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+       err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
                                    NULL);
 
        kvfree(in);
@@ -2259,7 +2259,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_dealloc_pd;
        }
 
-       err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+       err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
        if (err) {
                mlx5_core_err(mdev, "create mkey failed, %d\n", err);
                goto err_dealloc_transport_domain;
@@ -2333,7 +2333,7 @@ err_destroy_tises:
        mlx5e_destroy_tises(priv);
 
 err_destroy_mkey:
-       mlx5_core_destroy_mkey(mdev, &priv->mr);
+       mlx5_core_destroy_mkey(mdev, &priv->mkey);
 
 err_dealloc_transport_domain:
        mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
@@ -2367,7 +2367,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
        mlx5e_close_drop_rq(priv);
        mlx5e_destroy_tises(priv);
-       mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+       mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
index 6f68dba8d7edcfb212e23461d20b8bd24a3d6723..bf3446794bd5f5d3eb05229d3e57fdb10ed3f046 100644 (file)
@@ -77,6 +77,9 @@
 #define KERNEL_NUM_PRIOS 1
 #define KENREL_MIN_LEVEL 2
 
+#define ANCHOR_MAX_FT 1
+#define ANCHOR_NUM_PRIOS 1
+#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
 struct node_caps {
        size_t  arr_sz;
        long    *caps;
@@ -92,7 +95,7 @@ static struct init_tree_node {
        int max_ft;
 } root_fs = {
        .type = FS_TYPE_NAMESPACE,
-       .ar_size = 3,
+       .ar_size = 4,
        .children = (struct init_tree_node[]) {
                ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
                         FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
@@ -108,6 +111,8 @@ static struct init_tree_node {
                                          FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
                                          FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
                         ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+               ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+                        ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
        }
 };
 
@@ -196,8 +201,10 @@ static void tree_put_node(struct fs_node *node)
 
 static int tree_remove_node(struct fs_node *node)
 {
-       if (atomic_read(&node->refcount) > 1)
-               return -EPERM;
+       if (atomic_read(&node->refcount) > 1) {
+               atomic_dec(&node->refcount);
+               return -EEXIST;
+       }
        tree_put_node(node);
        return 0;
 }
@@ -360,6 +367,11 @@ static void del_rule(struct fs_node *node)
        memcpy(match_value, fte->val, sizeof(fte->val));
        fs_get_obj(ft, fg->node.parent);
        list_del(&rule->node.list);
+       if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+               mutex_lock(&rule->dest_attr.ft->lock);
+               list_del(&rule->next_ft);
+               mutex_unlock(&rule->dest_attr.ft->lock);
+       }
        fte->dests_size--;
        if (fte->dests_size) {
                err = mlx5_cmd_update_fte(dev, ft,
@@ -465,6 +477,8 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
        ft->node.type = FS_TYPE_FLOW_TABLE;
        ft->type = table_type;
        ft->max_fte = max_fte;
+       INIT_LIST_HEAD(&ft->fwd_rules);
+       mutex_init(&ft->lock);
 
        return ft;
 }
@@ -601,9 +615,63 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
        return err;
 }
 
+static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+                                       struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg;
+       struct fs_fte *fte;
+       int err = 0;
+
+       fs_get_obj(fte, rule->node.parent);
+       if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+               return -EINVAL;
+       lock_ref_node(&fte->node);
+       fs_get_obj(fg, fte->node.parent);
+       fs_get_obj(ft, fg->node.parent);
+
+       memcpy(&rule->dest_attr, dest, sizeof(*dest));
+       err = mlx5_cmd_update_fte(get_dev(&ft->node),
+                                 ft, fg->id, fte);
+       unlock_ref_node(&fte->node);
+
+       return err;
+}
+
+/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
+static int connect_fwd_rules(struct mlx5_core_dev *dev,
+                            struct mlx5_flow_table *new_next_ft,
+                            struct mlx5_flow_table *old_next_ft)
+{
+       struct mlx5_flow_destination dest;
+       struct mlx5_flow_rule *iter;
+       int err = 0;
+
+       /* new_next_ft and old_next_ft could be NULL only
+        * when we create/destroy the anchor flow table.
+        */
+       if (!new_next_ft || !old_next_ft)
+               return 0;
+
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = new_next_ft;
+
+       mutex_lock(&old_next_ft->lock);
+       list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
+       mutex_unlock(&old_next_ft->lock);
+       list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+               err = mlx5_modify_rule_destination(iter, &dest);
+               if (err)
+                       pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
+                              new_next_ft->id);
+       }
+       return 0;
+}
+
 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
                              struct fs_prio *prio)
 {
+       struct mlx5_flow_table *next_ft;
        int err = 0;
 
        /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
@@ -612,6 +680,11 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
                err = connect_prev_fts(dev, ft, prio);
                if (err)
                        return err;
+
+               next_ft = find_next_chained_ft(prio);
+               err = connect_fwd_rules(dev, ft, next_ft);
+               if (err)
+                       return err;
        }
 
        if (MLX5_CAP_FLOWTABLE(dev,
@@ -762,6 +835,7 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
        if (!rule)
                return NULL;
 
+       INIT_LIST_HEAD(&rule->next_ft);
        rule->node.type = FS_TYPE_FLOW_DEST;
        memcpy(&rule->dest_attr, dest, sizeof(*dest));
 
@@ -782,9 +856,14 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
                return ERR_PTR(-ENOMEM);
 
        fs_get_obj(ft, fg->node.parent);
-       /* Add dest to dests list- added as first element after the head */
+       /* Add dest to dests list- we need flow tables to be in the
+        * end of the list for forward to next prio rules.
+        */
        tree_init_node(&rule->node, 1, del_rule);
-       list_add_tail(&rule->node.list, &fte->node.children);
+       if (dest && dest->type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+               list_add(&rule->node.list, &fte->node.children);
+       else
+               list_add_tail(&rule->node.list, &fte->node.children);
        fte->dests_size++;
        if (fte->dests_size == 1)
                err = mlx5_cmd_create_fte(get_dev(&ft->node),
@@ -903,6 +982,25 @@ out:
        return fg;
 }
 
+static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
+                                            struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_rule *rule;
+
+       list_for_each_entry(rule, &fte->node.children, node.list) {
+               if (rule->dest_attr.type == dest->type) {
+                       if ((dest->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+                            dest->vport_num == rule->dest_attr.vport_num) ||
+                           (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+                            dest->ft == rule->dest_attr.ft) ||
+                           (dest->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+                            dest->tir_num == rule->dest_attr.tir_num))
+                               return rule;
+               }
+       }
+       return NULL;
+}
+
 static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
                                          u32 *match_value,
                                          u8 action,
@@ -919,6 +1017,13 @@ static struct mlx5_flow_rule *add_rule_fg(struct mlx5_flow_group *fg,
                nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
                if (compare_match_value(&fg->mask, match_value, &fte->val) &&
                    action == fte->action && flow_tag == fte->flow_tag) {
+                       rule = find_flow_rule(fte, dest);
+                       if (rule) {
+                               atomic_inc(&rule->node.refcount);
+                               unlock_ref_node(&fte->node);
+                               unlock_ref_node(&fg->node);
+                               return rule;
+                       }
                        rule = add_rule_fte(fte, fg, dest);
                        unlock_ref_node(&fte->node);
                        if (IS_ERR(rule))
@@ -984,14 +1089,14 @@ static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
        return rule;
 }
 
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
-                  u8 match_criteria_enable,
-                  u32 *match_criteria,
-                  u32 *match_value,
-                  u32 action,
-                  u32 flow_tag,
-                  struct mlx5_flow_destination *dest)
+static struct mlx5_flow_rule *
+_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+                   u8 match_criteria_enable,
+                   u32 *match_criteria,
+                   u32 *match_value,
+                   u32 action,
+                   u32 flow_tag,
+                   struct mlx5_flow_destination *dest)
 {
        struct mlx5_flow_group *g;
        struct mlx5_flow_rule *rule;
@@ -1014,6 +1119,63 @@ unlock:
        unlock_ref_node(&ft->node);
        return rule;
 }
+
+static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
+{
+       return ((ft->type == FS_FT_NIC_RX) &&
+               (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
+}
+
+struct mlx5_flow_rule *
+mlx5_add_flow_rule(struct mlx5_flow_table *ft,
+                  u8 match_criteria_enable,
+                  u32 *match_criteria,
+                  u32 *match_value,
+                  u32 action,
+                  u32 flow_tag,
+                  struct mlx5_flow_destination *dest)
+{
+       struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+       struct mlx5_flow_destination gen_dest;
+       struct mlx5_flow_table *next_ft = NULL;
+       struct mlx5_flow_rule *rule = NULL;
+       u32 sw_action = action;
+       struct fs_prio *prio;
+
+       fs_get_obj(prio, ft->node.parent);
+       if (action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+               if (!fwd_next_prio_supported(ft))
+                       return ERR_PTR(-EOPNOTSUPP);
+               if (dest)
+                       return ERR_PTR(-EINVAL);
+               mutex_lock(&root->chain_lock);
+               next_ft = find_next_chained_ft(prio);
+               if (next_ft) {
+                       gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+                       gen_dest.ft = next_ft;
+                       dest = &gen_dest;
+                       action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+               } else {
+                       mutex_unlock(&root->chain_lock);
+                       return ERR_PTR(-EOPNOTSUPP);
+               }
+       }
+
+       rule =  _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+                                   match_value, action, flow_tag, dest);
+
+       if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+               if (!IS_ERR_OR_NULL(rule) &&
+                   (list_empty(&rule->next_ft))) {
+                       mutex_lock(&next_ft->lock);
+                       list_add(&rule->next_ft, &next_ft->fwd_rules);
+                       mutex_unlock(&next_ft->lock);
+                       rule->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+               }
+               mutex_unlock(&root->chain_lock);
+       }
+       return rule;
+}
 EXPORT_SYMBOL(mlx5_add_flow_rule);
 
 void mlx5_del_flow_rule(struct mlx5_flow_rule *rule)
@@ -1077,6 +1239,10 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
                return 0;
 
        next_ft = find_next_chained_ft(prio);
+       err = connect_fwd_rules(dev, next_ft, ft);
+       if (err)
+               return err;
+
        err = connect_prev_fts(dev, next_ft, prio);
        if (err)
                mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
@@ -1126,6 +1292,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
        case MLX5_FLOW_NAMESPACE_BYPASS:
        case MLX5_FLOW_NAMESPACE_KERNEL:
        case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+       case MLX5_FLOW_NAMESPACE_ANCHOR:
                prio = type;
                break;
        case MLX5_FLOW_NAMESPACE_FDB:
@@ -1351,6 +1518,25 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
        }
 }
 
+#define ANCHOR_PRIO 0
+#define ANCHOR_SIZE 1
+static int create_anchor_flow_table(struct mlx5_core_dev
+                                                       *dev)
+{
+       struct mlx5_flow_namespace *ns = NULL;
+       struct mlx5_flow_table *ft;
+
+       ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+       if (!ns)
+               return -EINVAL;
+       ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+       if (IS_ERR(ft)) {
+               mlx5_core_err(dev, "Failed to create last anchor flow table");
+               return PTR_ERR(ft);
+       }
+       return 0;
+}
+
 static int init_root_ns(struct mlx5_core_dev *dev)
 {
 
@@ -1363,6 +1549,9 @@ static int init_root_ns(struct mlx5_core_dev *dev)
 
        set_prio_attrs(dev->priv.root_ns);
 
+       if (create_anchor_flow_table(dev))
+               goto cleanup;
+
        return 0;
 
 cleanup:
@@ -1392,6 +1581,15 @@ static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
        root_ns = NULL;
 }
 
+static void destroy_flow_tables(struct fs_prio *prio)
+{
+       struct mlx5_flow_table *iter;
+       struct mlx5_flow_table *tmp;
+
+       fs_for_each_ft_safe(iter, tmp, prio)
+               mlx5_destroy_flow_table(iter);
+}
+
 static void cleanup_root_ns(struct mlx5_core_dev *dev)
 {
        struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
@@ -1420,6 +1618,7 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
                                                         list);
 
                                fs_get_obj(obj_iter_prio2, iter_prio2);
+                               destroy_flow_tables(obj_iter_prio2);
                                if (tree_remove_node(iter_prio2)) {
                                        mlx5_core_warn(dev,
                                                       "Priority %d wasn't destroyed, refcount > 1\n",
index 00245fd7e4bc8a561e6dc298d4d17396e3688420..f37a6248a27bedd7b6850ecb56c2de67a97c10da 100644 (file)
@@ -68,6 +68,11 @@ struct fs_node {
 struct mlx5_flow_rule {
        struct fs_node                          node;
        struct mlx5_flow_destination            dest_attr;
+       /* next_ft should be accessed under chain_lock and only of
+        * destination type is FWD_NEXT_fT.
+        */
+       struct list_head                        next_ft;
+       u32                                     sw_action;
 };
 
 /* Type of children is mlx5_flow_group */
@@ -82,6 +87,10 @@ struct mlx5_flow_table {
                unsigned int            required_groups;
                unsigned int            num_groups;
        } autogroup;
+       /* Protect fwd_rules */
+       struct mutex                    lock;
+       /* FWD rules that point on this flow table */
+       struct list_head                fwd_rules;
 };
 
 /* Type of children is mlx5_flow_rule */
@@ -142,6 +151,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 #define fs_list_for_each_entry(pos, root)              \
        list_for_each_entry(pos, root, node.list)
 
+#define fs_list_for_each_entry_safe(pos, tmp, root)            \
+       list_for_each_entry_safe(pos, tmp, root, node.list)
+
 #define fs_for_each_ns_or_ft_reverse(pos, prio)                                \
        list_for_each_entry_reverse(pos, &(prio)->node.children, list)
 
@@ -157,6 +169,9 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
 #define fs_for_each_ft(pos, prio)                      \
        fs_list_for_each_entry(pos, &(prio)->node.children)
 
+#define fs_for_each_ft_safe(pos, tmp, prio)                    \
+       fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
+
 #define fs_for_each_fg(pos, ft)                        \
        fs_list_for_each_entry(pos, &(ft)->node.children)
 
index aa1ab47023852dcf0b163e43e57629121ab71a3a..75c7ae6a5cc40f91ffc2a329c2bd763c8cfb79a2 100644 (file)
@@ -98,88 +98,55 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
 {
        int err;
 
-       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
-       if (err)
-               return err;
-
-       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
        if (err)
                return err;
 
        if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, pg)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, atomic)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, roce)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, nic_flow_table)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
            MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
-                                        HCA_CAP_OPMOD_GET_CUR);
-               if (err)
-                       return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE,
-                                        HCA_CAP_OPMOD_GET_MAX);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
        if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
-                                        HCA_CAP_OPMOD_GET_CUR);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH,
-                                        HCA_CAP_OPMOD_GET_MAX);
+       }
+
+       if (MLX5_CAP_GEN(dev, vector_calc)) {
+               err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
                if (err)
                        return err;
        }
index 1545a944c309bf3205ad49fc1de90bd21c3eaacd..f2354bc0ec19cbef3ce737d4ec810e30c6a4b3c7 100644 (file)
@@ -341,8 +341,9 @@ static u16 to_fw_pkey_sz(u32 size)
        }
 }
 
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
-                      enum mlx5_cap_mode cap_mode)
+static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
+                                  enum mlx5_cap_type cap_type,
+                                  enum mlx5_cap_mode cap_mode)
 {
        u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
        int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
@@ -392,6 +393,16 @@ query_ex:
        return err;
 }
 
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
+{
+       int ret;
+
+       ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
+       if (ret)
+               return ret;
+       return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
+}
+
 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
 {
        u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
@@ -419,8 +430,7 @@ static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
        int err;
 
        if (MLX5_CAP_GEN(dev, atomic)) {
-               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
-                                        HCA_CAP_OPMOD_GET_CUR);
+               err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
                if (err)
                        return err;
        } else {
@@ -462,11 +472,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
        if (!set_ctx)
                goto query_ex;
 
-       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
-       if (err)
-               goto query_ex;
-
-       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+       err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
        if (err)
                goto query_ex;
 
@@ -1117,7 +1123,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
        mlx5_init_cq_table(dev);
        mlx5_init_qp_table(dev);
        mlx5_init_srq_table(dev);
-       mlx5_init_mr_table(dev);
+       mlx5_init_mkey_table(dev);
 
        err = mlx5_init_fs(dev);
        if (err) {
@@ -1164,7 +1170,7 @@ err_sriov:
 err_reg_dev:
        mlx5_cleanup_fs(dev);
 err_fs:
-       mlx5_cleanup_mr_table(dev);
+       mlx5_cleanup_mkey_table(dev);
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
@@ -1237,7 +1243,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
 #endif
 
        mlx5_cleanup_fs(dev);
-       mlx5_cleanup_mr_table(dev);
+       mlx5_cleanup_mkey_table(dev);
        mlx5_cleanup_srq_table(dev);
        mlx5_cleanup_qp_table(dev);
        mlx5_cleanup_cq_table(dev);
index 6fa22b51e4607d4f235aeb9078dff67fa03ee4a9..77a7293921d59a3e4941f505bb1bfb39fec1f955 100644 (file)
 #include <linux/mlx5/cmd.h>
 #include "mlx5_core.h"
 
-void mlx5_init_mr_table(struct mlx5_core_dev *dev)
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
 {
-       struct mlx5_mr_table *table = &dev->priv.mr_table;
+       struct mlx5_mkey_table *table = &dev->priv.mkey_table;
 
        memset(table, 0, sizeof(*table));
        rwlock_init(&table->lock);
        INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
 }
 
-void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev)
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
 {
 }
 
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+                         struct mlx5_core_mkey *mkey,
                          struct mlx5_create_mkey_mbox_in *in, int inlen,
                          mlx5_cmd_cbk_t callback, void *context,
                          struct mlx5_create_mkey_mbox_out *out)
 {
-       struct mlx5_mr_table *table = &dev->priv.mr_table;
+       struct mlx5_mkey_table *table = &dev->priv.mkey_table;
        struct mlx5_create_mkey_mbox_out lout;
        int err;
        u8 key;
@@ -83,34 +84,35 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
                return mlx5_cmd_status_to_err(&lout.hdr);
        }
 
-       mr->iova = be64_to_cpu(in->seg.start_addr);
-       mr->size = be64_to_cpu(in->seg.len);
-       mr->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
-       mr->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
+       mkey->iova = be64_to_cpu(in->seg.start_addr);
+       mkey->size = be64_to_cpu(in->seg.len);
+       mkey->key = mlx5_idx_to_mkey(be32_to_cpu(lout.mkey) & 0xffffff) | key;
+       mkey->pd = be32_to_cpu(in->seg.flags_pd) & 0xffffff;
 
        mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
-                     be32_to_cpu(lout.mkey), key, mr->key);
+                     be32_to_cpu(lout.mkey), key, mkey->key);
 
-       /* connect to MR tree */
+       /* connect to mkey tree */
        write_lock_irq(&table->lock);
-       err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
+       err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
        write_unlock_irq(&table->lock);
        if (err) {
-               mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
-                              mlx5_base_mkey(mr->key), err);
-               mlx5_core_destroy_mkey(dev, mr);
+               mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
+                              mlx5_base_mkey(mkey->key), err);
+               mlx5_core_destroy_mkey(dev, mkey);
        }
 
        return err;
 }
 EXPORT_SYMBOL(mlx5_core_create_mkey);
 
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+                          struct mlx5_core_mkey *mkey)
 {
-       struct mlx5_mr_table *table = &dev->priv.mr_table;
+       struct mlx5_mkey_table *table = &dev->priv.mkey_table;
        struct mlx5_destroy_mkey_mbox_in in;
        struct mlx5_destroy_mkey_mbox_out out;
-       struct mlx5_core_mr *deleted_mr;
+       struct mlx5_core_mkey *deleted_mkey;
        unsigned long flags;
        int err;
 
@@ -118,16 +120,16 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        memset(&out, 0, sizeof(out));
 
        write_lock_irqsave(&table->lock, flags);
-       deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+       deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
        write_unlock_irqrestore(&table->lock, flags);
-       if (!deleted_mr) {
-               mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
-                              mlx5_base_mkey(mr->key));
+       if (!deleted_mkey) {
+               mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
+                              mlx5_base_mkey(mkey->key));
                return -ENOENT;
        }
 
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
-       in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
+       in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
        if (err)
                return err;
@@ -139,7 +141,7 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
 }
 EXPORT_SYMBOL(mlx5_core_destroy_mkey);
 
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
                         struct mlx5_query_mkey_mbox_out *out, int outlen)
 {
        struct mlx5_query_mkey_mbox_in in;
@@ -149,7 +151,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        memset(out, 0, outlen);
 
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_MKEY);
-       in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
+       in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mkey->key));
        err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
        if (err)
                return err;
@@ -161,7 +163,7 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
 }
 EXPORT_SYMBOL(mlx5_core_query_mkey);
 
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
                             u32 *mkey)
 {
        struct mlx5_query_special_ctxs_mbox_in in;
index a87e773e93f3439dbd4cf93bc08a9a7563f4977b..5635ce7ad693e7306ee2f121e5f8d7aa78412528 100644 (file)
@@ -324,6 +324,29 @@ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
 
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+                            u8 port_num, void *out, size_t sz)
+{
+       u32 *in;
+       int err;
+
+       in  = mlx5_vzalloc(sz);
+       if (!in) {
+               err = -ENOMEM;
+               return err;
+       }
+
+       MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+       MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+       err = mlx5_core_access_reg(dev, in, sz, out,
+                                  sz, MLX5_REG_PPCNT, 0, 0);
+
+       kvfree(in);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
+
 int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
 {
        u32 in[MLX5_ST_SZ_DW(pfcc_reg)];
index c7398b95aecdc0286480f85564e5a42660275bd6..bd518405859ed3974f7049196503353c7ee647fa 100644 (file)
@@ -850,3 +850,111 @@ int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
        return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
 }
 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+                                 int vf, u8 port_num, void *out,
+                                 size_t out_sz)
+{
+       int     in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+       int     is_group_manager;
+       void   *in;
+       int     err;
+
+       is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+       in = mlx5_vzalloc(in_sz);
+       if (!in) {
+               err = -ENOMEM;
+               return err;
+       }
+
+       MLX5_SET(query_vport_counter_in, in, opcode,
+                MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+       if (other_vport) {
+               if (is_group_manager) {
+                       MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+                       MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
+               } else {
+                       err = -EPERM;
+                       goto free;
+               }
+       }
+       if (MLX5_CAP_GEN(dev, num_ports) == 2)
+               MLX5_SET(query_vport_counter_in, in, port_num, port_num);
+
+       err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
+       if (err)
+               goto free;
+       err = mlx5_cmd_status_to_err_v2(out);
+
+free:
+       kvfree(in);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+                                      u8 other_vport, u8 port_num,
+                                      int vf,
+                                      struct mlx5_hca_vport_context *req)
+{
+       int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
+       u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
+       int is_group_manager;
+       void *in;
+       int err;
+       void *ctx;
+
+       mlx5_core_dbg(dev, "vf %d\n", vf);
+       is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+       in = kzalloc(in_sz, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       memset(out, 0, sizeof(out));
+       MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
+       if (other_vport) {
+               if (is_group_manager) {
+                       MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
+                       MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
+               } else {
+                       err = -EPERM;
+                       goto ex;
+               }
+       }
+
+       if (MLX5_CAP_GEN(dev, num_ports) > 1)
+               MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
+
+       ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
+       MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
+       MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
+       MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
+       MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
+       MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
+       MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
+       MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
+       MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+       MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+       MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
+       MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
+       MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
+       MLX5_SET(hca_vport_context, ctx, lid, req->lid);
+       MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
+       MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
+       MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
+       MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
+       MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
+       MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
+       MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+       err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+       if (err)
+               goto ex;
+
+       err = mlx5_cmd_status_to_err_v2(out);
+
+ex:
+       kfree(in);
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
index a0e8cc8dcc67dcc88e7d32ab47a8ef22af52c946..8541a913f6a36effd78ef24bb0bc3cd69ce20541 100644 (file)
@@ -219,6 +219,7 @@ enum {
        MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
        MLX4_DEV_CAP_FLAG2_LB_SRC_CHK           = 1ULL << 32,
        MLX4_DEV_CAP_FLAG2_ROCE_V1_V2           = 1ULL <<  33,
+       MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER   = 1ULL <<  34,
 };
 
 enum {
@@ -1160,6 +1161,8 @@ enum mlx4_net_trans_promisc_mode {
        MLX4_FS_REGULAR = 1,
        MLX4_FS_ALL_DEFAULT,
        MLX4_FS_MC_DEFAULT,
+       MLX4_FS_MIRROR_RX_PORT,
+       MLX4_FS_MIRROR_SX_PORT,
        MLX4_FS_UC_SNIFFER,
        MLX4_FS_MC_SNIFFER,
        MLX4_FS_MODE_NUM, /* should be last */
index 987764afa65c2344d5a85328f9f35a419e21db64..4b531c44b3c7767660375515f093242394a37e84 100644 (file)
@@ -105,6 +105,29 @@ __mlx5_mask(typ, fld))
        ___t; \
 })
 
+/* Big endian getters */
+#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
+       __mlx5_64_off(typ, fld)))
+
+#define MLX5_GET_BE(type_t, typ, p, fld) ({                              \
+               type_t tmp;                                               \
+               switch (sizeof(tmp)) {                                    \
+               case sizeof(u8):                                          \
+                       tmp = (__force type_t)MLX5_GET(typ, p, fld);      \
+                       break;                                            \
+               case sizeof(u16):                                         \
+                       tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
+                       break;                                            \
+               case sizeof(u32):                                         \
+                       tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
+                       break;                                            \
+               case sizeof(u64):                                         \
+                       tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
+                       break;                                            \
+                       }                                                 \
+               tmp;                                                      \
+               })
+
 enum {
        MLX5_MAX_COMMANDS               = 32,
        MLX5_CMD_DATA_BLOCK_SIZE        = 512,
@@ -1196,6 +1219,8 @@ enum mlx5_cap_type {
        MLX5_CAP_FLOW_TABLE,
        MLX5_CAP_ESWITCH_FLOW_TABLE,
        MLX5_CAP_ESWITCH,
+       MLX5_CAP_RESERVED,
+       MLX5_CAP_VECTOR_CALC,
        /* NUM OF CAP Types */
        MLX5_CAP_NUM
 };
@@ -1258,6 +1283,10 @@ enum mlx5_cap_type {
 #define MLX5_CAP_ODP(mdev, cap)\
        MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
 
+#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
+       MLX5_GET(vector_calc_cap, \
+                mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+
 enum {
        MLX5_CMD_STAT_OK                        = 0x0,
        MLX5_CMD_STAT_INT_ERR                   = 0x1,
@@ -1284,7 +1313,8 @@ enum {
        MLX5_RFC_3635_COUNTERS_GROUP          = 0x3,
        MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
        MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
-       MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11
+       MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+       MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
 };
 
 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
@@ -1294,6 +1324,11 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
        return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
 }
 
-#define MLX5_BY_PASS_NUM_PRIOS 9
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
+#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
+                               MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
+                               MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
 
 #endif /* MLX5_DEVICE_H */
index 1e3006dcf35d735175ebe054d8867f89b994231c..e1d987fb49b2789e618a0dd94c413c296d4e45d0 100644 (file)
@@ -338,7 +338,7 @@ struct mlx5_core_sig_ctx {
        u32                     sigerr_count;
 };
 
-struct mlx5_core_mr {
+struct mlx5_core_mkey {
        u64                     iova;
        u64                     size;
        u32                     key;
@@ -426,7 +426,7 @@ struct mlx5_srq_table {
        struct radix_tree_root  tree;
 };
 
-struct mlx5_mr_table {
+struct mlx5_mkey_table {
        /* protect radix tree
         */
        rwlock_t                lock;
@@ -484,9 +484,9 @@ struct mlx5_priv {
        struct mlx5_cq_table    cq_table;
        /* end: cq staff */
 
-       /* start: mr staff */
-       struct mlx5_mr_table    mr_table;
-       /* end: mr staff */
+       /* start: mkey staff */
+       struct mlx5_mkey_table  mkey_table;
+       /* end: mkey staff */
 
        /* start: alloc staff */
        /* protect buffer alocation according to numa node */
@@ -613,7 +613,10 @@ struct mlx5_pas {
 };
 
 enum port_state_policy {
-       MLX5_AAA_000
+       MLX5_POLICY_DOWN        = 0,
+       MLX5_POLICY_UP          = 1,
+       MLX5_POLICY_FOLLOW      = 2,
+       MLX5_POLICY_INVALID     = 0xffffffff
 };
 
 enum phy_port_state {
@@ -706,8 +709,7 @@ void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
 int mlx5_cmd_status_to_err_v2(void *ptr);
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
-                      enum mlx5_cap_mode cap_mode);
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
                  int out_size);
 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -739,16 +741,18 @@ int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                        struct mlx5_query_srq_mbox_out *out);
 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
                      u16 lwm, int is_srq);
-void mlx5_init_mr_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mr_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+                         struct mlx5_core_mkey *mkey,
                          struct mlx5_create_mkey_mbox_in *in, int inlen,
                          mlx5_cmd_cbk_t callback, void *context,
                          struct mlx5_create_mkey_mbox_out *out);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+                          struct mlx5_core_mkey *mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
                         struct mlx5_query_mkey_mbox_out *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
                             u32 *mkey);
 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
@@ -847,6 +851,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
                        struct mlx5_odp_caps *odp_caps);
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+                            u8 port_num, void *out, size_t sz);
 
 static inline int fw_initializing(struct mlx5_core_dev *dev)
 {
index 8230caa3fb6edef576f7455b2df16bd6713171f3..8dec5508d93d355e3101935106bcfb582e795193 100644 (file)
 
 #define MLX5_FS_DEFAULT_FLOW_TAG 0x0
 
+enum {
+       MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO  = 1 << 16,
+};
+
 #define LEFTOVERS_RULE_NUM      2
 static inline void build_leftovers_ft_param(int *priority,
                                            int *n_ent,
@@ -52,6 +56,7 @@ enum mlx5_flow_namespace_type {
        MLX5_FLOW_NAMESPACE_BYPASS,
        MLX5_FLOW_NAMESPACE_KERNEL,
        MLX5_FLOW_NAMESPACE_LEFTOVERS,
+       MLX5_FLOW_NAMESPACE_ANCHOR,
        MLX5_FLOW_NAMESPACE_FDB,
 };
 
index 51f1e540fc2b83bf9dd143637bad8acdbc44d0fd..bb9e07ca65345ac0b739b5a2b87c5f6c711c21a1 100644 (file)
@@ -458,7 +458,8 @@ struct mlx5_ifc_ads_bits {
 };
 
 struct mlx5_ifc_flow_table_nic_cap_bits {
-       u8         reserved_at_0[0x200];
+       u8         nic_rx_multi_path_tirs[0x1];
+       u8         reserved_at_1[0x1ff];
 
        struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
 
@@ -615,6 +616,33 @@ struct mlx5_ifc_odp_cap_bits {
        u8         reserved_at_e0[0x720];
 };
 
+struct mlx5_ifc_calc_op {
+       u8        reserved_at_0[0x10];
+       u8        reserved_at_10[0x9];
+       u8        op_swap_endianness[0x1];
+       u8        op_min[0x1];
+       u8        op_xor[0x1];
+       u8        op_or[0x1];
+       u8        op_and[0x1];
+       u8        op_max[0x1];
+       u8        op_add[0x1];
+};
+
+struct mlx5_ifc_vector_calc_cap_bits {
+       u8         calc_matrix[0x1];
+       u8         reserved_at_1[0x1f];
+       u8         reserved_at_20[0x8];
+       u8         max_vec_count[0x8];
+       u8         reserved_at_30[0xd];
+       u8         max_chunk_size[0x3];
+       struct mlx5_ifc_calc_op calc0;
+       struct mlx5_ifc_calc_op calc1;
+       struct mlx5_ifc_calc_op calc2;
+       struct mlx5_ifc_calc_op calc3;
+
+       u8         reserved_at_e0[0x720];
+};
+
 enum {
        MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
        MLX5_WQ_TYPE_CYCLIC       = 0x1,
@@ -736,7 +764,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         cqe_version[0x4];
 
        u8         compact_address_vector[0x1];
-       u8         reserved_at_200[0xe];
+       u8         reserved_at_200[0x3];
+       u8         ipoib_basic_offloads[0x1];
+       u8         reserved_at_204[0xa];
        u8         drain_sigerr[0x1];
        u8         cmdif_checksum[0x2];
        u8         sigerr_cqe[0x1];
@@ -767,10 +797,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         cd[0x1];
        u8         reserved_at_22c[0x1];
        u8         apm[0x1];
-       u8         reserved_at_22e[0x7];
+       u8         vector_calc[0x1];
+       u8         reserved_at_22f[0x1];
+       u8         imaicl[0x1];
+       u8         reserved_at_231[0x4];
        u8         qkv[0x1];
        u8         pkv[0x1];
-       u8         reserved_at_237[0x4];
+       u8         set_deth_sqpn[0x1];
+       u8         reserved_at_239[0x3];
        u8         xrc[0x1];
        u8         ud[0x1];
        u8         uc[0x1];
@@ -1208,6 +1242,36 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
        u8         reserved_at_640[0x180];
 };
 
+struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
+       u8         symbol_error_counter[0x10];
+
+       u8         link_error_recovery_counter[0x8];
+
+       u8         link_downed_counter[0x8];
+
+       u8         port_rcv_errors[0x10];
+
+       u8         port_rcv_remote_physical_errors[0x10];
+
+       u8         port_rcv_switch_relay_errors[0x10];
+
+       u8         port_xmit_discards[0x10];
+
+       u8         port_xmit_constraint_errors[0x8];
+
+       u8         port_rcv_constraint_errors[0x8];
+
+       u8         reserved_at_70[0x8];
+
+       u8         link_overrun_errors[0x8];
+
+       u8         reserved_at_80[0x10];
+
+       u8         vl_15_dropped[0x10];
+
+       u8         reserved_at_a0[0xa0];
+};
+
 struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
        u8         transmit_queue_high[0x20];
 
@@ -1780,7 +1844,7 @@ struct mlx5_ifc_qpc_bits {
        u8         log_sq_size[0x4];
        u8         reserved_at_55[0x6];
        u8         rlky[0x1];
-       u8         reserved_at_5c[0x4];
+       u8         ulp_stateless_offload_mode[0x4];
 
        u8         counter_set_id[0x8];
        u8         uar_page[0x18];
@@ -1904,6 +1968,7 @@ union mlx5_ifc_hca_cap_union_bits {
        struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
        struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
        struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+       struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
        u8         reserved_at_0[0x8000];
 };
 
@@ -2618,6 +2683,7 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
        struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
        struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
        struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+       struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
        struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
        u8         reserved_at_0[0x7c0];
 };
@@ -3126,7 +3192,8 @@ struct mlx5_ifc_query_vport_counter_in_bits {
        u8         op_mod[0x10];
 
        u8         other_vport[0x1];
-       u8         reserved_at_41[0xf];
+       u8         reserved_at_41[0xb];
+       u8         port_num[0x4];
        u8         vport_number[0x10];
 
        u8         reserved_at_60[0x60];
@@ -3629,6 +3696,12 @@ struct mlx5_ifc_query_hca_vport_pkey_in_bits {
        u8         pkey_index[0x10];
 };
 
+enum {
+       MLX5_HCA_VPORT_SEL_PORT_GUID    = 1 << 0,
+       MLX5_HCA_VPORT_SEL_NODE_GUID    = 1 << 1,
+       MLX5_HCA_VPORT_SEL_STATE_POLICY = 1 << 2,
+};
+
 struct mlx5_ifc_query_hca_vport_gid_out_bits {
        u8         status[0x8];
        u8         reserved_at_8[0x18];
@@ -6954,6 +7027,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
        struct mlx5_ifc_peir_reg_bits peir_reg;
        struct mlx5_ifc_pelc_reg_bits pelc_reg;
        struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+       struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
        struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
        struct mlx5_ifc_pifr_reg_bits pifr_reg;
        struct mlx5_ifc_pipg_reg_bits pipg_reg;
index 5b8c89ffaa5830fdf05fbc40a1fce0b5d2ffa7f3..cf031a3f16c583047d6c59389e7d57ce198356d1 100644 (file)
@@ -499,7 +499,8 @@ struct mlx5_qp_context {
        u8                      reserved2[4];
        __be32                  next_send_psn;
        __be32                  cqn_send;
-       u8                      reserved3[8];
+       __be32                  deth_sqpn;
+       u8                      reserved3[4];
        __be32                  last_acked_psn;
        __be32                  ssn;
        __be32                  params2;
@@ -621,9 +622,9 @@ static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u
        return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
 }
 
-static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
+static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
 {
-       return radix_tree_lookup(&dev->priv.mr_table.tree, key);
+       return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
 }
 
 struct mlx5_page_fault_resume_mbox_in {
index 123771003e68586571690f9a5211e5afaa0143e3..bd93e63236036b7086206714ec3e61cb052358c6 100644 (file)
@@ -92,5 +92,12 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
 
 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+                                 int vf, u8 port_num, void *out,
+                                 size_t out_sz);
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+                                      u8 other_vport, u8 port_num,
+                                      int vf,
+                                      struct mlx5_hca_vport_context *req);
 
 #endif /* __MLX5_VPORT_H__ */
index 5440b7b705eb1756f4d095e6eab3518893245568..7b4ae218b90bcfe6eeef660fe34972a3bddc3800 100644 (file)
@@ -1147,6 +1147,9 @@ struct net_device_ops {
                                                   struct nlattr *port[]);
        int                     (*ndo_get_vf_port)(struct net_device *dev,
                                                   int vf, struct sk_buff *skb);
+       int                     (*ndo_set_vf_guid)(struct net_device *dev,
+                                                  int vf, u64 guid,
+                                                  int guid_type);
        int                     (*ndo_set_vf_rss_query_en)(
                                                   struct net_device *dev,
                                                   int vf, bool setting);
index 0ff049bd9ad413c47b92aaaf116a43d1738e4b32..37dd534cbeab89595280cb2aa0545a1702fd33fd 100644 (file)
@@ -424,11 +424,11 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
 /**
  * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
  * @mad_agent: MAD agent that snooped the MAD.
- * @send_wr: Work request information on the sent MAD.
+ * @send_buf: send MAD data buffer.
  * @mad_send_wc: Work completion information on the sent MAD.  Valid
  *   only for snooping that occurs on a send completion.
  *
- * Clients snooping MADs should not modify data referenced by the @send_wr
+ * Clients snooping MADs should not modify data referenced by the @send_buf
  * or @mad_send_wc.
  */
 typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
index d7d531cf00b76575d0654a62ccedd41b657e00bb..fb2cef4e97471bbeb67dc950f88736dd34a21b3c 100644 (file)
@@ -56,6 +56,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 
+#include <linux/if_link.h>
 #include <linux/atomic.h>
 #include <linux/mmu_notifier.h>
 #include <asm/uaccess.h>
@@ -97,6 +98,11 @@ enum rdma_node_type {
        RDMA_NODE_USNIC_UDP,
 };
 
+enum {
+       /* set the local administered indication */
+       IB_SA_WELL_KNOWN_GUID   = BIT_ULL(57) | 2,
+};
+
 enum rdma_transport_type {
        RDMA_TRANSPORT_IB,
        RDMA_TRANSPORT_IWARP,
@@ -212,6 +218,8 @@ enum ib_device_cap_flags {
        IB_DEVICE_MANAGED_FLOW_STEERING         = (1 << 29),
        IB_DEVICE_SIGNATURE_HANDOVER            = (1 << 30),
        IB_DEVICE_ON_DEMAND_PAGING              = (1 << 31),
+       IB_DEVICE_SG_GAPS_REG                   = (1ULL << 32),
+       IB_DEVICE_VIRTUAL_FUNCTION              = ((u64)1 << 33),
 };
 
 enum ib_signature_prot_cap {
@@ -273,7 +281,7 @@ struct ib_device_attr {
        u32                     hw_ver;
        int                     max_qp;
        int                     max_qp_wr;
-       int                     device_cap_flags;
+       u64                     device_cap_flags;
        int                     max_sge;
        int                     max_sge_rd;
        int                     max_cq;
@@ -489,6 +497,7 @@ union rdma_protocol_stats {
                                        | RDMA_CORE_CAP_OPA_MAD)
 
 struct ib_port_attr {
+       u64                     subnet_prefix;
        enum ib_port_state      state;
        enum ib_mtu             max_mtu;
        enum ib_mtu             active_mtu;
@@ -508,6 +517,7 @@ struct ib_port_attr {
        u8                      active_width;
        u8                      active_speed;
        u8                      phys_state;
+       bool                    grh_required;
 };
 
 enum ib_device_modify_flags {
@@ -663,10 +673,15 @@ __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate);
  * @IB_MR_TYPE_SIGNATURE:     memory region that is used for
  *                            signature operations (data-integrity
  *                            capable regions)
+ * @IB_MR_TYPE_SG_GAPS:       memory region that is capable to
+ *                            register any arbitrary sg lists (without
+ *                            the normal mr constraints - see
+ *                            ib_map_mr_sg)
  */
 enum ib_mr_type {
        IB_MR_TYPE_MEM_REG,
        IB_MR_TYPE_SIGNATURE,
+       IB_MR_TYPE_SG_GAPS,
 };
 
 /**
@@ -1488,6 +1503,11 @@ enum ib_flow_domain {
        IB_FLOW_DOMAIN_NUM /* Must be last */
 };
 
+enum ib_flow_flags {
+       IB_FLOW_ATTR_FLAGS_DONT_TRAP = 1UL << 1, /* Continue match, no steal */
+       IB_FLOW_ATTR_FLAGS_RESERVED  = 1UL << 2  /* Must be last */
+};
+
 struct ib_flow_eth_filter {
        u8      dst_mac[6];
        u8      src_mac[6];
@@ -1809,7 +1829,8 @@ struct ib_device {
                                                struct scatterlist *sg,
                                                int sg_nents);
        struct ib_mw *             (*alloc_mw)(struct ib_pd *pd,
-                                              enum ib_mw_type type);
+                                              enum ib_mw_type type,
+                                              struct ib_udata *udata);
        int                        (*dealloc_mw)(struct ib_mw *mw);
        struct ib_fmr *            (*alloc_fmr)(struct ib_pd *pd,
                                                int mr_access_flags,
@@ -1847,6 +1868,16 @@ struct ib_device {
        int                        (*check_mr_status)(struct ib_mr *mr, u32 check_mask,
                                                      struct ib_mr_status *mr_status);
        void                       (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
+       void                       (*drain_rq)(struct ib_qp *qp);
+       void                       (*drain_sq)(struct ib_qp *qp);
+       int                        (*set_vf_link_state)(struct ib_device *device, int vf, u8 port,
+                                                       int state);
+       int                        (*get_vf_config)(struct ib_device *device, int vf, u8 port,
+                                                  struct ifla_vf_info *ivf);
+       int                        (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
+                                                  struct ifla_vf_stats *stats);
+       int                        (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
+                                                 int type);
 
        struct ib_dma_mapping_ops   *dma_ops;
 
@@ -2290,6 +2321,15 @@ int ib_query_gid(struct ib_device *device,
                 u8 port_num, int index, union ib_gid *gid,
                 struct ib_gid_attr *attr);
 
+int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
+                        int state);
+int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
+                    struct ifla_vf_info *info);
+int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
+                   struct ifla_vf_stats *stats);
+int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
+                  int type);
+
 int ib_query_pkey(struct ib_device *device,
                  u8 port_num, u16 index, u16 *pkey);
 
@@ -3095,4 +3135,7 @@ int ib_sg_to_pages(struct ib_mr *mr,
                   int sg_nents,
                   int (*set_page)(struct ib_mr *, u64));
 
+void ib_drain_rq(struct ib_qp *qp);
+void ib_drain_sq(struct ib_qp *qp);
+void ib_drain_qp(struct ib_qp *qp);
 #endif /* IB_VERBS_H */
index 036bd277266254dba1ff5807a760e17da013667d..6d0065c322b706aaad7ac34703834f12e42ea39d 100644 (file)
@@ -83,8 +83,10 @@ struct iw_cm_id {
        iw_cm_handler           cm_handler;      /* client callback function */
        void                    *context;        /* client cb context */
        struct ib_device        *device;
-       struct sockaddr_storage local_addr;
+       struct sockaddr_storage local_addr;      /* local addr */
        struct sockaddr_storage remote_addr;
+       struct sockaddr_storage m_local_addr;    /* nmapped local addr */
+       struct sockaddr_storage m_remote_addr;   /* nmapped rem addr */
        void                    *provider_data;  /* provider private data */
        iw_event_handler        event_handler;   /* cb for provider
                                                    events */
@@ -92,6 +94,7 @@ struct iw_cm_id {
        void (*add_ref)(struct iw_cm_id *);
        void (*rem_ref)(struct iw_cm_id *);
        u8  tos;
+       bool mapped;
 };
 
 struct iw_cm_conn_param {
@@ -123,6 +126,7 @@ struct iw_cm_verbs {
                                         int backlog);
 
        int             (*destroy_listen)(struct iw_cm_id *cm_id);
+       char            ifname[IFNAMSIZ];
 };
 
 /**
index a30b78090594d500df10aa91f9f1b6628ae88593..1d01e8a4e5dd3ab4d052f234a541c7d7bc778c02 100644 (file)
@@ -556,6 +556,8 @@ enum {
                                 */
        IFLA_VF_STATS,          /* network device statistics */
        IFLA_VF_TRUST,          /* Trust VF */
+       IFLA_VF_IB_NODE_GUID,   /* VF Infiniband node GUID */
+       IFLA_VF_IB_PORT_GUID,   /* VF Infiniband port GUID */
        __IFLA_VF_MAX,
 };
 
@@ -588,6 +590,11 @@ struct ifla_vf_spoofchk {
        __u32 setting;
 };
 
+struct ifla_vf_guid {
+       __u32 vf;
+       __u64 guid;
+};
+
 enum {
        IFLA_VF_LINK_STATE_AUTO,        /* link state of the uplink */
        IFLA_VF_LINK_STATE_ENABLE,      /* link always up */
index c19a5dc1531af5df6f58ec8fadb41fe51c7afc3d..6e373d151cad750ee637fea41800652d02548681 100644 (file)
@@ -5,9 +5,10 @@
 
 enum {
        RDMA_NL_RDMA_CM = 1,
-       RDMA_NL_NES,
-       RDMA_NL_C4IW,
+       RDMA_NL_IWCM,
+       RDMA_NL_RSVD,
        RDMA_NL_LS,     /* RDMA Local Services */
+       RDMA_NL_I40IW,
        RDMA_NL_NUM_CLIENTS
 };
 
index 52b4a2f993f2c91e8fdeb82e6d826467f34ec03c..1852e383afd6263967c8c9e85fcdc4c9b6529193 100644 (file)
@@ -109,14 +109,13 @@ struct p9_trans_rdma {
 /**
  * p9_rdma_context - Keeps track of in-process WR
  *
- * @wc_op: The original WR op for when the CQE completes in error.
  * @busa: Bus address to unmap when the WR completes
  * @req: Keeps track of requests (send)
  * @rc: Keepts track of replies (receive)
  */
 struct p9_rdma_req;
 struct p9_rdma_context {
-       enum ib_wc_opcode wc_op;
+       struct ib_cqe cqe;
        dma_addr_t busa;
        union {
                struct p9_req_t *req;
@@ -284,9 +283,12 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
 }
 
 static void
-handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
-           struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+recv_done(struct ib_cq *cq, struct ib_wc *wc)
 {
+       struct p9_client *client = cq->cq_context;
+       struct p9_trans_rdma *rdma = client->trans;
+       struct p9_rdma_context *c =
+               container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
        struct p9_req_t *req;
        int err = 0;
        int16_t tag;
@@ -295,7 +297,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
        ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
                                                         DMA_FROM_DEVICE);
 
-       if (status != IB_WC_SUCCESS)
+       if (wc->status != IB_WC_SUCCESS)
                goto err_out;
 
        err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
@@ -316,21 +318,32 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
        req->rc = c->rc;
        p9_client_cb(client, req, REQ_STATUS_RCVD);
 
+ out:
+       up(&rdma->rq_sem);
+       kfree(c);
        return;
 
  err_out:
-       p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status);
+       p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
+                       req, err, wc->status);
        rdma->state = P9_RDMA_FLUSHING;
        client->status = Disconnected;
+       goto out;
 }
 
 static void
-handle_send(struct p9_client *client, struct p9_trans_rdma *rdma,
-           struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
+send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
+       struct p9_client *client = cq->cq_context;
+       struct p9_trans_rdma *rdma = client->trans;
+       struct p9_rdma_context *c =
+               container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
+
        ib_dma_unmap_single(rdma->cm_id->device,
                            c->busa, c->req->tc->size,
                            DMA_TO_DEVICE);
+       up(&rdma->sq_sem);
+       kfree(c);
 }
 
 static void qp_event_handler(struct ib_event *event, void *context)
@@ -339,42 +352,6 @@ static void qp_event_handler(struct ib_event *event, void *context)
                 event->event, context);
 }
 
-static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
-{
-       struct p9_client *client = cq_context;
-       struct p9_trans_rdma *rdma = client->trans;
-       int ret;
-       struct ib_wc wc;
-
-       ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
-       while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
-               struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
-
-               switch (c->wc_op) {
-               case IB_WC_RECV:
-                       handle_recv(client, rdma, c, wc.status, wc.byte_len);
-                       up(&rdma->rq_sem);
-                       break;
-
-               case IB_WC_SEND:
-                       handle_send(client, rdma, c, wc.status, wc.byte_len);
-                       up(&rdma->sq_sem);
-                       break;
-
-               default:
-                       pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
-                              c->wc_op, wc.opcode, wc.status);
-                       break;
-               }
-               kfree(c);
-       }
-}
-
-static void cq_event_handler(struct ib_event *e, void *v)
-{
-       p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
-}
-
 static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
 {
        if (!rdma)
@@ -387,7 +364,7 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
                ib_dealloc_pd(rdma->pd);
 
        if (rdma->cq && !IS_ERR(rdma->cq))
-               ib_destroy_cq(rdma->cq);
+               ib_free_cq(rdma->cq);
 
        if (rdma->cm_id && !IS_ERR(rdma->cm_id))
                rdma_destroy_id(rdma->cm_id);
@@ -408,13 +385,14 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
        if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
                goto error;
 
+       c->cqe.done = recv_done;
+
        sge.addr = c->busa;
        sge.length = client->msize;
        sge.lkey = rdma->pd->local_dma_lkey;
 
        wr.next = NULL;
-       c->wc_op = IB_WC_RECV;
-       wr.wr_id = (unsigned long) c;
+       wr.wr_cqe = &c->cqe;
        wr.sg_list = &sge;
        wr.num_sge = 1;
        return ib_post_recv(rdma->qp, &wr, &bad_wr);
@@ -499,13 +477,14 @@ dont_need_post_recv:
                goto send_error;
        }
 
+       c->cqe.done = send_done;
+
        sge.addr = c->busa;
        sge.length = c->req->tc->size;
        sge.lkey = rdma->pd->local_dma_lkey;
 
        wr.next = NULL;
-       c->wc_op = IB_WC_SEND;
-       wr.wr_id = (unsigned long) c;
+       wr.wr_cqe = &c->cqe;
        wr.opcode = IB_WR_SEND;
        wr.send_flags = IB_SEND_SIGNALED;
        wr.sg_list = &sge;
@@ -642,7 +621,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
        struct p9_trans_rdma *rdma;
        struct rdma_conn_param conn_param;
        struct ib_qp_init_attr qp_attr;
-       struct ib_cq_init_attr cq_attr = {};
 
        /* Parse the transport specific mount options */
        err = parse_opts(args, &opts);
@@ -695,13 +673,11 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
                goto error;
 
        /* Create the Completion Queue */
-       cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1;
-       rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler,
-                               cq_event_handler, client,
-                               &cq_attr);
+       rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
+                       opts.sq_depth + opts.rq_depth + 1,
+                       0, IB_POLL_SOFTIRQ);
        if (IS_ERR(rdma->cq))
                goto error;
-       ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
 
        /* Create the Protection Domain */
        rdma->pd = ib_alloc_pd(rdma->cm_id->device);
index d735e854f916040912fb12930cbc6a7950ace942..4b6f3db9f8afb8589be7ec4363911d6770abae2c 100644 (file)
@@ -1387,6 +1387,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
        [IFLA_VF_RSS_QUERY_EN]  = { .len = sizeof(struct ifla_vf_rss_query_en) },
        [IFLA_VF_STATS]         = { .type = NLA_NESTED },
        [IFLA_VF_TRUST]         = { .len = sizeof(struct ifla_vf_trust) },
+       [IFLA_VF_IB_NODE_GUID]  = { .len = sizeof(struct ifla_vf_guid) },
+       [IFLA_VF_IB_PORT_GUID]  = { .len = sizeof(struct ifla_vf_guid) },
 };
 
 static const struct nla_policy ifla_vf_stats_policy[IFLA_VF_STATS_MAX + 1] = {
@@ -1534,6 +1536,22 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
        return 0;
 }
 
+static int handle_infiniband_guid(struct net_device *dev, struct ifla_vf_guid *ivt,
+                                 int guid_type)
+{
+       const struct net_device_ops *ops = dev->netdev_ops;
+
+       return ops->ndo_set_vf_guid(dev, ivt->vf, ivt->guid, guid_type);
+}
+
+static int handle_vf_guid(struct net_device *dev, struct ifla_vf_guid *ivt, int guid_type)
+{
+       if (dev->type != ARPHRD_INFINIBAND)
+               return -EOPNOTSUPP;
+
+       return handle_infiniband_guid(dev, ivt, guid_type);
+}
+
 static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -1636,6 +1654,24 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr **tb)
                        return err;
        }
 
+       if (tb[IFLA_VF_IB_NODE_GUID]) {
+               struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]);
+
+               if (!ops->ndo_set_vf_guid)
+                       return -EOPNOTSUPP;
+
+               return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID);
+       }
+
+       if (tb[IFLA_VF_IB_PORT_GUID]) {
+               struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]);
+
+               if (!ops->ndo_set_vf_guid)
+                       return -EOPNOTSUPP;
+
+               return handle_vf_guid(dev, ivt, IFLA_VF_IB_PORT_GUID);
+       }
+
        return err;
 }