]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branches 'core', 'cxgb4', 'iser', 'mlx4', 'mlx5', 'ocrdma', 'odp', 'qib' and...
authorRoland Dreier <roland@purestorage.com>
Fri, 20 Feb 2015 17:04:40 +0000 (09:04 -0800)
committerRoland Dreier <roland@purestorage.com>
Fri, 20 Feb 2015 17:04:40 +0000 (09:04 -0800)
53 files changed:
MAINTAINERS
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_diag.c
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_eeprom.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mmap.c
drivers/infiniband/hw/qib/qib_mr.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_twsi.c
drivers/infiniband/hw/qib/qib_tx.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/qib/qib_verbs_mcast.c
drivers/infiniband/hw/qib/qib_wc_x86_64.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
include/uapi/rdma/ib_user_verbs.h

index d66a97dd3a12548e0f79c59154260724da11f282..d2357a1da410bc872e5040f17c24177ee6e71c59 100644 (file)
@@ -8450,7 +8450,7 @@ S:        Maintained
 F:     drivers/scsi/sr*
 
 SCSI RDMA PROTOCOL (SRP) INITIATOR
-M:     Bart Van Assche <bvanassche@acm.org>
+M:     Bart Van Assche <bart.vanassche@sandisk.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
index 56a4b7ca7ee38ba118796d6d5e3eb770bbebc69a..45d67e9228d75b44d71354d248cf97140dfe37ad 100644 (file)
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
        if (!optlen)
                return -EINVAL;
 
+       memset(&sa_path, 0, sizeof(sa_path));
+       sa_path.vlan_id = 0xffff;
+
        ib_sa_unpack_path(path_data->path_rec, &sa_path);
        ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
        if (ret)
index 6095872549e79fb0dd9c0b3702c4eb01e974fb3f..8b8cc6fa0ab0c1ebf966b2ace4932807d9181bf1 100644 (file)
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
        if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
                rbt_ib_umem_insert(&umem->odp_data->interval_tree,
                                   &context->umem_tree);
-       if (likely(!atomic_read(&context->notifier_count)))
+       if (likely(!atomic_read(&context->notifier_count)) ||
+           context->odp_mrs_count == 1)
                umem->odp_data->mn_counters_active = true;
        else
                list_add(&umem->odp_data->no_private_counters,
index 643c08a025a52d015431b8a27be1ddcacbd36845..b716b08156446e186c9ae608f3f4e6343c6f200f 100644 (file)
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index b7943ff16ed3f2edece8ec4cc3c7931594bd943a..a9f048990dfcd833de09978c0448ad979749e4c9 100644 (file)
@@ -400,6 +400,52 @@ err:
        return ret;
 }
 
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+                                 struct ib_uverbs_query_device_resp *resp,
+                                 struct ib_device_attr *attr)
+{
+       resp->fw_ver            = attr->fw_ver;
+       resp->node_guid         = file->device->ib_dev->node_guid;
+       resp->sys_image_guid    = attr->sys_image_guid;
+       resp->max_mr_size       = attr->max_mr_size;
+       resp->page_size_cap     = attr->page_size_cap;
+       resp->vendor_id         = attr->vendor_id;
+       resp->vendor_part_id    = attr->vendor_part_id;
+       resp->hw_ver            = attr->hw_ver;
+       resp->max_qp            = attr->max_qp;
+       resp->max_qp_wr         = attr->max_qp_wr;
+       resp->device_cap_flags  = attr->device_cap_flags;
+       resp->max_sge           = attr->max_sge;
+       resp->max_sge_rd        = attr->max_sge_rd;
+       resp->max_cq            = attr->max_cq;
+       resp->max_cqe           = attr->max_cqe;
+       resp->max_mr            = attr->max_mr;
+       resp->max_pd            = attr->max_pd;
+       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
+       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
+       resp->max_res_rd_atom   = attr->max_res_rd_atom;
+       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
+       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
+       resp->atomic_cap                = attr->atomic_cap;
+       resp->max_ee                    = attr->max_ee;
+       resp->max_rdd                   = attr->max_rdd;
+       resp->max_mw                    = attr->max_mw;
+       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
+       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
+       resp->max_mcast_grp             = attr->max_mcast_grp;
+       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
+       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+       resp->max_ah                    = attr->max_ah;
+       resp->max_fmr                   = attr->max_fmr;
+       resp->max_map_per_fmr           = attr->max_map_per_fmr;
+       resp->max_srq                   = attr->max_srq;
+       resp->max_srq_wr                = attr->max_srq_wr;
+       resp->max_srq_sge               = attr->max_srq_sge;
+       resp->max_pkeys                 = attr->max_pkeys;
+       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
+       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+}
+
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-
-       resp.fw_ver                    = attr.fw_ver;
-       resp.node_guid                 = file->device->ib_dev->node_guid;
-       resp.sys_image_guid            = attr.sys_image_guid;
-       resp.max_mr_size               = attr.max_mr_size;
-       resp.page_size_cap             = attr.page_size_cap;
-       resp.vendor_id                 = attr.vendor_id;
-       resp.vendor_part_id            = attr.vendor_part_id;
-       resp.hw_ver                    = attr.hw_ver;
-       resp.max_qp                    = attr.max_qp;
-       resp.max_qp_wr                 = attr.max_qp_wr;
-       resp.device_cap_flags          = attr.device_cap_flags;
-       resp.max_sge                   = attr.max_sge;
-       resp.max_sge_rd                = attr.max_sge_rd;
-       resp.max_cq                    = attr.max_cq;
-       resp.max_cqe                   = attr.max_cqe;
-       resp.max_mr                    = attr.max_mr;
-       resp.max_pd                    = attr.max_pd;
-       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
-       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
-       resp.max_res_rd_atom           = attr.max_res_rd_atom;
-       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
-       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
-       resp.atomic_cap                = attr.atomic_cap;
-       resp.max_ee                    = attr.max_ee;
-       resp.max_rdd                   = attr.max_rdd;
-       resp.max_mw                    = attr.max_mw;
-       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
-       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
-       resp.max_mcast_grp             = attr.max_mcast_grp;
-       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
-       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
-       resp.max_ah                    = attr.max_ah;
-       resp.max_fmr                   = attr.max_fmr;
-       resp.max_map_per_fmr           = attr.max_map_per_fmr;
-       resp.max_srq                   = attr.max_srq;
-       resp.max_srq_wr                = attr.max_srq_wr;
-       resp.max_srq_sge               = attr.max_srq_sge;
-       resp.max_pkeys                 = attr.max_pkeys;
-       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
-       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+       copy_query_dev_fields(file, &resp, &attr);
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        if (qp->real_qp == qp) {
                ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
                if (ret)
-                       goto out;
+                       goto release_qp;
                ret = qp->device->modify_qp(qp, attr,
                        modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
        } else {
                ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
        }
 
-       put_qp_read(qp);
-
        if (ret)
-               goto out;
+               goto release_qp;
 
        ret = in_len;
 
+release_qp:
+       put_qp_read(qp);
+
 out:
        kfree(attr);
 
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+                             struct ib_udata *ucore,
+                             struct ib_udata *uhw)
+{
+       struct ib_uverbs_ex_query_device_resp resp;
+       struct ib_uverbs_ex_query_device  cmd;
+       struct ib_device_attr attr;
+       struct ib_device *device;
+       int err;
+
+       device = file->device->ib_dev;
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
+       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+       if (err)
+               return err;
+
+       if (cmd.comp_mask)
+               return -EINVAL;
+
+       if (cmd.reserved)
+               return -EINVAL;
+
+       resp.response_length = offsetof(typeof(resp), odp_caps);
+
+       if (ucore->outlen < resp.response_length)
+               return -ENOSPC;
+
+       err = device->query_device(device, &attr);
+       if (err)
+               return err;
+
+       copy_query_dev_fields(file, &resp.base, &attr);
+       resp.comp_mask = 0;
+
+       if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
+               goto end;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+       resp.odp_caps.per_transport_caps.rc_odp_caps =
+               attr.odp_caps.per_transport_caps.rc_odp_caps;
+       resp.odp_caps.per_transport_caps.uc_odp_caps =
+               attr.odp_caps.per_transport_caps.uc_odp_caps;
+       resp.odp_caps.per_transport_caps.ud_odp_caps =
+               attr.odp_caps.per_transport_caps.ud_odp_caps;
+       resp.odp_caps.reserved = 0;
+#else
+       memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
+#endif
+       resp.response_length += sizeof(resp.odp_caps);
+
+end:
+       err = ib_copy_to_udata(ucore, &resp, resp.response_length);
+       if (err)
+               return err;
+
+       return 0;
+}
index 5db1a8cc388da0c5de517bf69b3d8136b94a1bbf..259dcc7779f5e01bc95b66ca90e64d20f7c94087 100644 (file)
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
+       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index c9df0549f51dc0921eede052f8cf0753ce83e0df..4498a89f4cedffed855251540b2e89299227139c 100644 (file)
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
        struct c4iw_cq *chp;
        unsigned long flag;
 
+       spin_lock_irqsave(&dev->lock, flag);
        chp = get_chp(dev, qid);
        if (chp) {
+               atomic_inc(&chp->refcnt);
+               spin_unlock_irqrestore(&dev->lock, flag);
                t4_clear_cq_armed(&chp->cq);
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-       } else
+               if (atomic_dec_and_test(&chp->refcnt))
+                       wake_up(&chp->wait);
+       } else {
                PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+               spin_unlock_irqrestore(&dev->lock, flag);
+       }
        return 0;
 }
index b5678ac97393ab94ba73b6b1f396ae801801c480..d87e1650f6437835f3660c21d3a59ec920fa8f7c 100644 (file)
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
        return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
-#define C4IW_WR_TO (30*HZ)
+#define C4IW_WR_TO (60*HZ)
 
 struct c4iw_wr_wait {
        struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
                                 u32 hwtid, u32 qpid,
                                 const char *func)
 {
-       unsigned to = C4IW_WR_TO;
        int ret;
 
-       do {
-               ret = wait_for_completion_timeout(&wr_waitp->completion, to);
-               if (!ret) {
-                       printk(KERN_ERR MOD "%s - Device %s not responding - "
-                              "tid %u qpid %u\n", func,
-                              pci_name(rdev->lldi.pdev), hwtid, qpid);
-                       if (c4iw_fatal_error(rdev)) {
-                               wr_waitp->ret = -EIO;
-                               break;
-                       }
-                       to = to << 2;
-               }
-       } while (!ret);
+       if (c4iw_fatal_error(rdev)) {
+               wr_waitp->ret = -EIO;
+               goto out;
+       }
+
+       ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
+       if (!ret) {
+               PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+                    func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+               rdev->flags |= T4_FATAL_ERROR;
+               wr_waitp->ret = -EIO;
+       }
+out:
        if (wr_waitp->ret)
                PDBG("%s: FW reply %d tid %u qpid %u\n",
                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
index 6559af60bffd62fbf162320379ff545ca4c974fc..e08db7020cd4939809dd456882ff3d4e69480480 100644 (file)
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
 /* clean up any chip type-specific stuff */
 void ipath_chip_done(void);
 
-/* check to see if we have to force ordering for write combining */
-int ipath_unordered_wc(void);
-
 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
                          unsigned cnt);
 void ipath_cancel_sends(struct ipath_devdata *, int);
index 1d7bd82a1fb1fa3ffe8c4cebf08a3cd1b57529ce..1a7e20a75149ce6a6bab980b466585784595c0f1 100644 (file)
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
 {
        return 0;
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int ipath_unordered_wc(void)
-{
-       return 1;
-}
index 3428acb0868c202383304105546fc023d3a0917e..4ad0b932df1fab1c1897f144db9ffc8af35c5f73 100644 (file)
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
                dd->ipath_wc_cookie = 0; /* even on failure */
        }
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering.  Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int ipath_unordered_wc(void)
-{
-       return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
index 56a593e0ae5d1f537db0f3615ee29eb99b0defc2..39a488889fc7a9981213b25567f051dc772bc510 100644 (file)
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
                *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
                if (*slave < 0) {
                        mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
-                                       gid.global.interface_id);
+                                    be64_to_cpu(gid.global.interface_id));
                        return -ENOENT;
                }
                return 0;
index a3b70f6c4035b2f9e106941e057d36258019b7c1..cb63ecd2276f4c47675eb69287314e85a424df35 100644 (file)
@@ -367,8 +367,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int err;
 
        mutex_lock(&cq->resize_mutex);
-
-       if (entries < 1) {
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
                err = -EINVAL;
                goto out;
        }
@@ -379,7 +378,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                goto out;
        }
 
-       if (entries > dev->dev->caps.max_cqes) {
+       if (entries > dev->dev->caps.max_cqes + 1) {
                err = -EINVAL;
                goto out;
        }
@@ -392,7 +391,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                /* Can't be smaller than the number of outstanding CQEs */
                outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
                if (entries < outst_cqe + 1) {
-                       err = 0;
+                       err = -EINVAL;
                        goto out;
                }
 
index 9117b7a2d5f8beeddebb367e421bbf7a8ca81807..0b280b1c98dfb8c4bfa5eab04d202dda5899205f 100644 (file)
@@ -1222,8 +1222,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        u64 reg_id;
        struct mlx4_ib_steering *ib_steering = NULL;
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
@@ -1236,8 +1235,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
                                    prot, &reg_id);
-       if (err)
+       if (err) {
+               pr_err("multicast attach op failed, err %d\n", err);
                goto err_malloc;
+       }
 
        err = add_gid_entry(ibqp, gid);
        if (err)
@@ -1285,8 +1286,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
        u64 reg_id = 0;
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
index cf000b7ad64f9b11698ee8dbfa2aec9883e886db..c880329b4d64691e3596490b2c18ad67c6ab4ef9 100644 (file)
@@ -1674,8 +1674,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
-                               if (err)
-                                       return -EINVAL;
+                               if (err) {
+                                       err = -EINVAL;
+                                       goto out;
+                               }
                                if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
                                        dev->qp1_proxy[qp->port - 1] = qp;
                        }
index 03bf81211a5401c366522c68213ecf27cdf4b326..cc4ac1e583b29725af01e03e40bebaee758c6e01 100644 (file)
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
        struct mlx5_general_caps *gen;
-       int err = 0;
+       int err = -ENOMEM;
        int port;
 
        gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+       dev->ib_dev.uverbs_ex_cmd_mask =
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 32a28bd50b20ae08c41a9a7086b72045f7934c3d..cd9822eeacae3f1ab138731ea9c6a67963974bc2 100644 (file)
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
                goto err_2;
        }
        mr->umem = umem;
+       mr->dev = dev;
        mr->live = 1;
        kvfree(in);
 
index c00ae093b6f881870867b8dac16af33efca4091b..ffd48bfc4923457e5383345acfa3620fa5f6a52f 100644 (file)
@@ -1082,12 +1082,6 @@ struct qib_devdata {
        /* control high-level access to EEPROM */
        struct mutex eep_lock;
        uint64_t traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
-       uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
-       uint16_t eep_hrs;
        /*
         * masks for which bits of errs, hwerrs that cause
         * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
                    const void *buffer, int len);
 void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
 void qib_dump_lookup_output_queue(struct qib_devdata *);
 void qib_force_pio_avail_update(struct qib_devdata *);
 void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
  * Flush write combining store buffers (if present) and perform a write
  * barrier.
  */
+static inline void qib_flush_wc(void)
+{
 #if defined(CONFIG_X86_64)
-#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+       asm volatile("sfence" : : : "memory");
 #else
-#define qib_flush_wc() wmb() /* no reorder around wc flush */
+       wmb(); /* no reorder around wc flush */
 #endif
+}
 
 /* global module parameter variables */
 extern unsigned qib_ibmtu;
index 5670ace27c639adb351b9928c65ed081a599a910..4fb78abd8ba1ad69629a9752878d4aa30a4f2229 100644 (file)
@@ -257,7 +257,7 @@ struct qib_base_info {
 
        /* shared memory page for send buffer disarm status */
        __u64 spi_sendbuf_status;
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /*
  * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
         */
        __u64 spu_base_info;
 
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /* User commands. */
 
index 6abd3ed3cd51ecf2c0a21927b0cf4894f7c48941..5e75b43c596b608adfacfeb48ea955ffa914082d 100644 (file)
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
        DEBUGFS_FILE_CREATE(opcode_stats);
        DEBUGFS_FILE_CREATE(ctx_stats);
        DEBUGFS_FILE_CREATE(qp_stats);
-       return;
 }
 
 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
index 5dfda4c5cc9c3b1fde36d02c55cd387b7e366542..8c34b23e5bf670b92b3d1cec4d7b9c9e93ba9a20 100644 (file)
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
                client_pool = dc->next;
        else
                /* None in pool, alloc and init */
-               dc = kmalloc(sizeof *dc, GFP_KERNEL);
+               dc = kmalloc(sizeof(*dc), GFP_KERNEL);
 
        if (dc) {
                dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        if (dd->userbase) {
                /* If user regs mapped, they are after send, so set limit. */
                u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
                if (!dd->piovl15base)
                        snd_lim = dd->uregbase;
                krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        snd_bottom = dd->pio2k_bufbase;
        if (snd_lim == 0) {
                u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
                snd_lim = snd_bottom + tot2k;
        }
        /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
        /* not very efficient, but it works for now */
        while (reg_addr < reg_end) {
                u64 data;
+
                if (copy_from_user(&data, uaddr, sizeof(data))) {
                        ret = -EFAULT;
                        goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
 
        if (!dd || !op)
                return -EINVAL;
-       olp = vmalloc(sizeof *olp);
+       olp = vmalloc(sizeof(*olp));
        if (!olp) {
                pr_err("vmalloc for observer failed\n");
                return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
                op = diag_get_observer(dd, *off);
                if (op) {
                        u32 offset = *off;
+
                        ret = op->hook(dd, op, offset, &data64, 0, use_32);
                }
                /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
                if (count == 4 || count == 8) {
                        u64 data64;
                        u32 offset = *off;
+
                        ret = copy_from_user(&data64, data, count);
                        if (ret) {
                                ret = -EFAULT;
index 5bee08f16d7438d2eba1668bdccee64265c9972d..f58fdc3d25a29a71909a22c772217c62a975b785 100644 (file)
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
 {
        static char iname[16];
 
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
+       snprintf(iname, sizeof(iname), "infinipath%u", unit);
        return iname;
 }
 
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
                qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
                if (qp_num != QIB_MULTICAST_QPN) {
                        int ruc_res;
+
                        qp = qib_lookup_qpn(ibp, qp_num);
                        if (!qp)
                                goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
        rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
        if (dd->flags & QIB_NODMA_RTAIL) {
                u32 seq = qib_hdrget_seq(rhf_addr);
+
                if (seq != rcd->seq_cnt)
                        goto bail;
                hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
        struct qib_devdata *dd = ppd->dd;
+
        ppd->lid = lid;
        ppd->lmc = lmc;
 
index 4d5d71aaa2b4e53e319b3c9cc57be37291f3cdd7..311ee6c3dd5e8b2e39382ef1c5d2741040a47636 100644 (file)
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
 
        if (t && dd0->nguid > 1 && t <= dd0->nguid) {
                u8 oguid;
+
                dd->base_guid = dd0->base_guid;
                bguid = (u8 *) &dd->base_guid;
 
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
                 * This board has a Serial-prefix, which is stored
                 * elsewhere for backward-compatibility.
                 */
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
+               memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
+               snp[sizeof(ifp->if_sprefix)] = '\0';
                len = strlen(snp);
                snp += len;
-               len = (sizeof dd->serial) - len;
-               if (len > sizeof ifp->if_serial)
-                       len = sizeof ifp->if_serial;
+               len = sizeof(dd->serial) - len;
+               if (len > sizeof(ifp->if_serial))
+                       len = sizeof(ifp->if_serial);
                memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
+       } else {
+               memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
+       }
        if (!strstr(ifp->if_comment, "Tested successfully"))
                qib_dev_err(dd,
                        "Board SN %s did not pass functional test: %s\n",
                        dd->serial, ifp->if_comment);
 
-       memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->active_time, 0);
-       dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
 done:
        vfree(buf);
 
 bail:;
 }
 
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
-       void *buf;
-       struct qib_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               if (dd->eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->active_time);
-
-       if (ret == 0 && new_time < 3600)
-               goto bail;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct qib_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               qib_dev_err(dd,
-                       "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
-                       len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->eep_lock);
-       if (ret) {
-               qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct qib_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                           csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct qib_flash,
-                                                   if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->eep_st_errs[idx] = new_val;
-                       dd->eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * Now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->active_time);
-               new_hrs += dd->eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct qib_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->eep_lock);
-       if (ret)
-               qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       new_val = dd->eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
index b15e34eeef685d510c781d25d08433e4b3e7c717..41937c6f888af13deadb6c7b25678cfc34596cf8 100644 (file)
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
                 * unless perhaps the user has mpin'ed the pages
                 * themselves.
                 */
-               qib_devinfo(dd->pcidev,
-                        "Failed to lock addr %p, %u pages: "
-                        "errno %d\n", (void *) vaddr, cnt, -ret);
+               qib_devinfo(
+                       dd->pcidev,
+                       "Failed to lock addr %p, %u pages: errno %d\n",
+                       (void *) vaddr, cnt, -ret);
                goto done;
        }
        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
                        goto cleanup;
                }
                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
+                                tidmap, sizeof(tidmap))) {
                        ret = -EFAULT;
                        goto cleanup;
                }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
        }
 
        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
+                          sizeof(tidmap))) {
                ret = -EFAULT;
                goto done;
        }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
                /* rcvegrbufs are read-only on the slave */
                if (vma->vm_flags & VM_WRITE) {
                        qib_devinfo(dd->pcidev,
-                                "Can't map eager buffers as "
-                                "writable (flags=%lx)\n", vma->vm_flags);
+                                "Can't map eager buffers as writable (flags=%lx)\n",
+                                vma->vm_flags);
                        ret = -EPERM;
                        goto bail;
                }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
         */
        if (weight >= qib_cpulist_count) {
                int cpu;
+
                cpu = find_first_zero_bit(qib_cpulist,
                                          qib_cpulist_count);
                if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
                uinfo->spu_userversion & 0xffff)) {
                qib_devinfo(dd->pcidev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while context sharing. Ensure "
-                        "that driver and library are from the same "
-                        "release.\n",
+                        "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
                         (int) (uinfo->spu_userversion >> 16),
                         (int) (uinfo->spu_userversion & 0xffff),
                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
        }
        if (!ppd) {
                u32 pidx = ctxt % dd->num_pports;
+
                if (usable(dd->pport + pidx))
                        ppd = dd->pport + pidx;
                else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 
        if (alg == QIB_PORT_ALG_ACROSS) {
                unsigned inuse = ~0U;
+
                /* find device (with ACTIVE ports) with fewest ctxts in use */
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
                        unsigned cused = 0, cfree = 0, pusable = 0;
+
                        if (!dd)
                                continue;
                        if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
        } else {
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
+
                        if (dd) {
                                ret = choose_port_ctxt(fp, dd, port, uinfo);
                                if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
        }
        for (ndev = 0; ndev < devmax; ndev++) {
                struct qib_devdata *dd = qib_lookup(ndev);
+
                if (dd) {
                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
                                ret = -EINVAL;
index 81854586c081fee37e0b3933a26d08e5e8b4e815..55f240a363fe04801bc39b38e41670f0db19b86e 100644 (file)
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
 {
        qib_stats.sps_ints = qib_sps_ints();
        return simple_read_from_buffer(buf, count, ppos, &qib_stats,
-                                      sizeof qib_stats);
+                                      sizeof(qib_stats));
 }
 
 /*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        return simple_read_from_buffer(buf, count, ppos, qib_statnames,
-               sizeof qib_statnames - 1); /* no null */
+               sizeof(qib_statnames) - 1); /* no null */
 }
 
 static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
        int ret, i;
 
        /* create the per-unit directory */
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
                          &simple_dir_operations, dd);
        if (ret) {
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
 
        root = dget(sb->s_root);
        mutex_lock(&root->d_inode->i_mutex);
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        dir = lookup_one_len(unit, root, strlen(unit));
 
        if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
                        const char *dev_name, void *data)
 {
        struct dentry *ret;
+
        ret = mount_single(fs_type, flags, data, qibfs_fill_super);
        if (!IS_ERR(ret))
                qib_super = ret->d_sb;
index d68266ac7619b896c49e500256c263ec1c3619c1..0d2ba59af30af66bce01ef8132c8182cc6e44a33 100644 (file)
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
         */
        mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
                ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
-       qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
                }
                if (crcs) {
                        u32 cntr = dd->cspec->lli_counter;
+
                        cntr += crcs;
                        if (cntr) {
                                if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
                        "irq is 0, BIOS error?  Interrupts won't work\n");
        else {
                int ret;
+
                ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
                                  QIB_DRV_NAME, dd);
                if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 
        qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
        int ret = 0;
+
        if (!strncmp(what, "ibc", 3)) {
                ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
        dd->cspec->cregbase = (u64 __iomem *)
                ((char __iomem *) dd->kregbase + cregbase);
index 7dec89fdc1248dc69c3cab38069e5e2f32986ce3..22affda8af88eacbd11f21abab55ba299dfb0e0b 100644 (file)
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
        errs &= QLOGIC_IB_E_SDMAERRS;
 
        msg = dd->cspec->sdmamsgbuf;
-       qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+       qib_decode_7220_sdma_errs(ppd, errs, msg,
+               sizeof(dd->cspec->sdmamsgbuf));
        spin_lock_irqsave(&ppd->sdma_lock, flags);
 
        if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
 static void reenable_7220_chase(unsigned long opaque)
 {
        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
        ppd->cpspec->chase_timer.expires = 0;
        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
                ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
                ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
 
-       qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 done:
        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
index a7eb32517a04bced55f9d1d5720b5d060d96e92b..ef97b71c8f7dd713a77401f593c6a10320a046e6 100644 (file)
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
 
 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation, \
+MODULE_PARM_DESC(long_attenuation,
                 "attenuation cutoff (dB) for long copper cable setup");
 
 static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
 static int  setup_txselect(const char *, struct kernel_param *);
 module_param_call(txselect, setup_txselect, param_get_string,
                  &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect, \
+MODULE_PARM_DESC(txselect,
                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 
 #define BOARD_QME7342 5
 #define BOARD_QMH7342 6
+#define BOARD_QMH7360 9
 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
                    BOARD_QMH7342)
 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        /* do these first, they are most important */
        if (errs & QIB_E_HARDWARE) {
                *msg = '\0';
-               qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        } else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        mask = QIB_E_HARDWARE;
        *msg = '\0';
 
-       err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+       err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
                   qib_7322error_msgs);
 
        /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
        *msg = '\0';
 
        if (errs & ~QIB_E_P_BITSEXTANT) {
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
                if (!*msg)
-                       snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+                       snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
                                 "no others");
                qib_dev_porterr(dd, ppd->port,
                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                /* determine cause, then write to clear */
                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
                           hdrchk_msgs);
                *msg = '\0';
                /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                         * isn't valid.  We don't want to confuse people, so
                         * we just don't print them, except at debug
                         */
-                       err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+                       err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                                   (errs & QIB_E_P_LINK_PKTERRS),
                                   qib_7322p_error_msgs);
                        *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                 * valid.  We don't want to confuse people, so we just
                 * don't print them, except at debug
                 */
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
                           qib_7322p_error_msgs);
                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
                *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
                if (dd->cspec->num_msix_entries) {
                        /* and same for MSIx */
                        u64 val = qib_read_kreg64(dd, kr_intgranted);
+
                        if (val)
                                qib_write_kreg(dd, kr_intgranted, val);
                }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
                int err;
                unsigned long flags;
                struct qib_pportdata *ppd = dd->pport;
+
                for (; pidx < dd->num_pports; ++pidx, ppd++) {
                        err = 0;
                        if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                qib_update_rhdrq_dca(rcd, cpu);
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                qib_update_sdma_dca(ppd, cpu);
        }
 }
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                dd = rcd->dd;
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                dd = ppd->dd;
        }
        qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                struct qib_pportdata *ppd;
                struct qib_qsfp_data *qd;
                u32 mask;
+
                if (!dd->pport[pidx].link_speed_supported)
                        continue;
                mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
                if (gpiostatus & dd->cspec->gpio_mask & mask) {
                        u64 pins;
+
                        qd = &ppd->cpspec->qsfp_data;
                        gpiostatus &= ~mask;
                        pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
        }
 
        /* Try to get MSIx interrupts */
-       memset(redirect, 0, sizeof redirect);
+       memset(redirect, 0, sizeof(redirect));
        mask = ~0ULL;
        msixnum = 0;
        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
                n = "InfiniPath_QME7362";
                dd->flags |= QIB_HAS_QSFP;
                break;
+       case BOARD_QMH7360:
+               n = "Intel IB QDR 1P FLR-QSFP Adptr";
+               dd->flags |= QIB_HAS_QSFP;
+               break;
        case 15:
                n = "InfiniPath_QLE7342_TEST";
                dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
         */
        for (i = 0; i < msix_entries; i++) {
                u64 vecaddr, vecdata;
+
                vecaddr = qib_read_kreg64(dd, 2 * i +
                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
                traffic_wds -= ppd->dd->traffic_wds;
                ppd->dd->traffic_wds += traffic_wds;
-               if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-                       atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
                                                QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
        u64 newctrlb;
+
        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
                                    IBA7322_IBC_IBTA_1_2_MASK |
                                    IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
        struct qib_devdata *dd;
        unsigned long val;
        char *n;
+
        if (strlen(str) >= MAX_ATTEN_LEN) {
                pr_info("txselect_values string too long\n");
                return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
        val = TIDFLOW_ERRBITS; /* these are W1C */
        for (i = 0; i < dd->cfgctxts; i++) {
                int flow;
+
                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
        }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 
        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
                struct qib_chippport_specific *cp = ppd->cpspec;
+
                ppd->link_speed_supported = features & PORT_SPD_CAP;
                features >>=  PORT_SPD_CAP_SHIFT;
                if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
                                ppd->vls_supported = IB_VL_VL0_7;
                        else {
                                qib_devinfo(dd->pcidev,
-                                           "Invalid num_vls %u for MTU %d "
-                                           ", using 4 VLs\n",
+                                           "Invalid num_vls %u for MTU %d , using 4 VLs\n",
                                            qib_num_cfg_vls, mtu);
                                ppd->vls_supported = IB_VL_VL0_3;
                                qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
        int ret = 0;
+
        if (ppd->dd->cspec->r1)
                ret = serdes_7322_init_old(ppd);
        else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-       u64 val;
-       val = SJA_EN;
+       u64 val = SJA_EN;
+
        qib_write_kreg(dd, kr_r_access, val);
        qib_read_kreg32(dd, kr_scratch);
        return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
 {
        u64 val;
        int timeout;
+
        for (timeout = 0; timeout < 100 ; ++timeout) {
                val = qib_read_kreg32(dd, kr_r_access);
                if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
                }
                if (inp) {
                        int tdi = inp[pos >> 3] >> (pos & 7);
+
                        val |= ((tdi & 1) << R_TDI_LSB);
                }
                qib_write_kreg(dd, kr_r_access, val);
index 729da39c49ed3fac472db472c3ac58b7766f8ada..2ee36953e234c46ff704bc6e5dfbed8339c09974 100644 (file)
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
         * Allocate full ctxtcnt array, rather than just cfgctxts, because
         * cleanup iterates across all possible ctxts.
         */
-       dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+       dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
        if (!dd->rcd) {
                qib_dev_err(dd,
                        "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
                        u8 hw_pidx, u8 port)
 {
        int size;
+
        ppd->dd = dd;
        ppd->hw_pidx = hw_pidx;
        ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
                ppd = dd->pport + pidx;
                if (!ppd->qib_wq) {
                        char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
                        snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
                                dd->unit, pidx);
                        ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
 
        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
                int mtu;
+
                if (lastfail)
                        ret = lastfail;
                ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
                qib_free_pportdata(ppd);
        }
 
-       qib_update_eeprom_log(dd);
 }
 
 /**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
        addr = vmalloc(cnt);
        if (!addr) {
                qib_devinfo(dd->pcidev,
-                        "Couldn't get memory for checking PIO perf,"
-                        " skipping\n");
+                        "Couldn't get memory for checking PIO perf, skipping\n");
                goto done;
        }
 
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
+
                qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
                                      sizeof(long), GFP_KERNEL);
                if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
        if (!list_empty(&dd->list))
                list_del_init(&dd->list);
        ib_dealloc_device(&dd->verbs_dev.ibdev);
-       return ERR_PTR(ret);;
+       return ERR_PTR(ret);
 }
 
 /*
index f4918f2165ec72616d51a3bd9c461b33f6ed8548..086616d071b988e38cc813b09a9659d787195c62 100644 (file)
@@ -168,7 +168,6 @@ skip_ibchange:
        ppd->lastibcstat = ibcs;
        if (ev)
                signal_ib_event(ppd, ev);
-       return;
 }
 
 void qib_clear_symerror_on_linkup(unsigned long opaque)
index 3b9afccaaade824370f5c0ea0d6d6ceb519e6090..ad843c786e7212d0c89f90264bacb5cb6b8346a0 100644 (file)
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
        if (!mr->lkey_published)
                goto out;
        if (lkey == 0)
-               rcu_assign_pointer(dev->dma_mr, NULL);
+               RCU_INIT_POINTER(dev->dma_mr, NULL);
        else {
                r = lkey >> (32 - ib_qib_lkey_table_size);
-               rcu_assign_pointer(rkt->table[r], NULL);
+               RCU_INIT_POINTER(rkt->table[r], NULL);
        }
        qib_put_mr(mr);
        mr->lkey_published = 0;
index 636be117b57859e690fdb0704be6fe10b429511f..395f4046dba2054633ad41f4a0f67a4dbee57c63 100644 (file)
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
        data.trap_num = trap_num;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_257_258.lid1 = lid1;
        data.details.ntc_257_258.lid2 = lid2;
        data.details.ntc_257_258.key = cpu_to_be32(key);
        data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
        data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
        data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_256.lid = data.issuer_lid;
        data.details.ntc_256.method = smp->method;
        data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
                       hop_cnt);
        }
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_145.lid = data.issuer_lid;
        data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.local_changes = 1;
        data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 static int subn_get_nodedescription(struct ib_smp *smp,
index 8b73a11d571c1671ba6678565c5e34754a3ab411..146cf29a2e1db19a8293f2ecbf3f8348ce1732c2 100644 (file)
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
                                           void *obj) {
        struct qib_mmap_info *ip;
 
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
+       ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
                goto bail;
 
index a77fb4fb14e43c255e23a41b7c86877835a78ea3..c4473db46699b5f367a0fd1b9d9df92bb2723d50 100644 (file)
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
 
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
        for (; i < m; i++) {
-               mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+               mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
                if (!mr->map[i])
                        goto bail;
        }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
                goto bail;
        }
 
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
        if (!mr)
                goto bail;
 
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
        if (size > PAGE_SIZE)
                return ERR_PTR(-EINVAL);
 
-       pl = kzalloc(sizeof *pl, GFP_KERNEL);
+       pl = kzalloc(sizeof(*pl), GFP_KERNEL);
        if (!pl)
                return ERR_PTR(-ENOMEM);
 
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+       fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
        if (!fmr)
                goto bail;
 
index 61a0046efb76ff9310b9a15f79dfe9dd11354c05..4758a3801ae8f916b6a96988b6c88ce5c344c588 100644 (file)
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
        /* We can't pass qib_msix_entry array to qib_msix_setup
         * so use a dummy msix_entry array and copy the allocated
         * irq back to the qib_msix_entry array. */
-       msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
+       msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
        if (!msix_entry)
                goto do_intx;
 
@@ -234,8 +234,10 @@ free_msix_entry:
        kfree(msix_entry);
 
 do_intx:
-       qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
-                       "falling back to INTx\n", nvec, ret);
+       qib_dev_err(
+               dd,
+               "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+               nvec, ret);
        *msixcnt = 0;
        qib_enable_intx(dd->pcidev);
 }
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
        int r;
+
        r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
                                   dd->pcibar0);
        if (r)
@@ -696,6 +699,7 @@ static void
 qib_pci_resume(struct pci_dev *pdev)
 {
        struct qib_devdata *dd = pci_get_drvdata(pdev);
+
        qib_devinfo(pdev, "QIB resume function called\n");
        pci_cleanup_aer_uncorrect_error_status(pdev);
        /*
index 6ddc0264aad2779ef327161c14a2ee9aef2e543f..4fa88ba2963e6ba21186ae5eb095ea531b741e97 100644 (file)
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
 
        if (rcu_dereference_protected(ibp->qp0,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp0, NULL);
+               RCU_INIT_POINTER(ibp->qp0, NULL);
        } else if (rcu_dereference_protected(ibp->qp1,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp1, NULL);
+               RCU_INIT_POINTER(ibp->qp1, NULL);
        } else {
                struct qib_qp *q;
                struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                                lockdep_is_held(&dev->qpt_lock))) != NULL;
                                qpp = &q->next)
                        if (q == qp) {
-                               rcu_assign_pointer(*qpp,
+                               RCU_INIT_POINTER(*qpp,
                                        rcu_dereference_protected(qp->next,
                                         lockdep_is_held(&dev->qpt_lock)));
                                removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
        for (n = 0; n < dev->qp_table_size; n++) {
                qp = rcu_dereference_protected(dev->qp_table[n],
                        lockdep_is_held(&dev->qpt_lock));
-               rcu_assign_pointer(dev->qp_table[n], NULL);
+               RCU_INIT_POINTER(dev->qp_table[n], NULL);
 
                for (; qp; qp = rcu_dereference_protected(qp->next,
                                        lockdep_is_held(&dev->qpt_lock)))
index fa71b1e666c5414fbba2357fe531e75cabc7986c..5e27f76805e28af0c0e0acdc9864360f8cf09b41 100644 (file)
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
         * Module could take up to 2 Msec to respond to MOD_SEL, and there
         * is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
        else if (pass)
                qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
 
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
         * Module could take up to 2 Msec to respond to MOD_SEL,
         * and there is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
         * going away, and there is no way to tell if it is ready.
         * so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
                 * set the page to zero, Even if it already appears to be zero.
                 */
                u8 poke = 0;
+
                ret = qib_qsfp_write(ppd, 127, &poke, 1);
                udelay(50);
                if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       return;
 }
 
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
 
        while (bidx < QSFP_DEFAULT_HDR_CNT) {
                int iidx;
+
                ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
                if (ret < 0)
                        goto bail;
index 2f2501890c4ea2b26a9ebd7a82755688605443e3..4544d6f88ad77c7f7c69fd6e6d4a138188f493b3 100644 (file)
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 4c07a8b34ffe27e2bfa89a19883fa25ef6aa64e7..f42bd0f47577a4557f47cac58de4a16b678b923d 100644 (file)
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
                struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 
                return ppd->guid;
-       } else
-               return ibp->guids[index - 1];
+       }
+       return ibp->guids[index - 1];
 }
 
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
                goto serr;
        }
 
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        send_status = IB_WC_SUCCESS;
 
        release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
            status != IB_WC_SUCCESS) {
                struct ib_wc wc;
 
-               memset(&wc, 0, sizeof wc);
+               memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
                wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 911205d3d5a0bf255fae65126740ab47eaa4a8ef..c72775f2721226868604b3770b1dbe23b7456dc0 100644 (file)
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
                 * it again during startup.
                 */
                u64 val;
+
                rst_val &= ~(1ULL);
                qib_write_kreg(dd, kr_hwerrmask,
                               dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                 * Both should be clear
                 */
                u64 newval = 0;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                /* Need to claim */
                u64 pollval;
                u64 newval = EPB_ACC_REQ | oct_sel;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
                        if (!sofar) {
                                /* Only set address at start of chunk */
                                int addrbyte = (addr + sofar) >> 8;
+
                                transval = csbit | EPB_MADDRH | addrbyte;
                                tries = epb_trans(dd, trans, transval,
                                                  &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
  * IRQ not set up at this point in init, so we poll.
  */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (30)
+#define TRIM_TMO (15)
 
 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
                        ret = 1;
                        break;
                }
-               msleep(10);
+               msleep(20);
        }
        if (trim_tmo >= TRIM_TMO) {
                qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
                dds_reg_map >>= 4;
                for (midx = 0; midx < DDS_ROWS; ++midx) {
                        u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
                        data = dds_init_vals[midx].reg_vals[idx];
                        writeq(data, daddr);
                        mmiowb();
index 3c8e4e3caca6240175bbddb35fb304107179920e..81f56cdff2bc280c7a64f816a215b10b703d0ec4 100644 (file)
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
                container_of(device, struct qib_ibdev, ibdev.dev);
        struct qib_devdata *dd = dd_from_dev(dev);
 
-       buf[sizeof dd->serial] = '\0';
-       memcpy(buf, dd->serial, sizeof dd->serial);
+       buf[sizeof(dd->serial)] = '\0';
+       memcpy(buf, dd->serial, sizeof(dd->serial));
        strcat(buf, "\n");
        return strlen(buf);
 }
@@ -611,28 +611,6 @@ bail:
        return ret < 0 ? ret : count;
 }
 
-static ssize_t show_logged_errs(struct device *device,
-                               struct device_attribute *attr, char *buf)
-{
-       struct qib_ibdev *dev =
-               container_of(device, struct qib_ibdev, ibdev.dev);
-       struct qib_devdata *dd = dd_from_dev(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (qib_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                                  dd->eep_st_errs[idx],
-                                  idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
 /*
  * Dump tempsense regs. in decimal, to ease shell-scripts.
  */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
        &dev_attr_nfreectxts,
        &dev_attr_serial,
        &dev_attr_boardversion,
-       &dev_attr_logged_errors,
        &dev_attr_tempsense,
        &dev_attr_localbus_info,
        &dev_attr_chip_reset,
index 647f7beb1b0a1669a841c8180a9d6f4f1722c4c9..f5698664419b430ac932fb396f7a35c11ca80816 100644 (file)
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
                udelay(2);
        else {
                int rise_usec;
+
                for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
                        if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
                                break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
        int ret = 1;
+
        if (flags & QIB_TWSI_START)
                start_seq(dd);
 
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
        int sub_len;
        const u8 *bp = buffer;
        int max_wait_time, i;
-       int ret;
-       ret = 1;
+       int ret = 1;
 
        while (len > 0) {
                if (dev == QIB_TWSI_NO_DEV) {
index 31d3561400a49f6056eb6be5c082b05cc0edb0e4..eface3b3dacf6a3d49c6d67b6702de7d38abc8ab 100644 (file)
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
 
        for (i = 0; i < cnt; i++) {
                int which;
+
                if (!test_bit(i, mask))
                        continue;
                /*
index aaf7039f8ed2112041174d1b320a3d8205348c08..26243b722b5e979c1324471b6e17871d3ef22540 100644 (file)
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
         * present on the wire.
         */
        length = swqe->length;
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        wc.byte_len = length + sizeof(struct ib_grh);
 
        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
index d2806cae234c254ef7e71ee58e03df5d2c1efdc9..3e0677c512768a7bd79a612fae251f0af70db512 100644 (file)
@@ -50,7 +50,7 @@
 /* expected size of headers (for dma_pool) */
 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
 /* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
 
 /*
  * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
                sdma_rb_node->refcount++;
        } else {
                int ret;
+
                sdma_rb_node = kmalloc(sizeof(
                        struct qib_user_sdma_rb_node), GFP_KERNEL);
                if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
+
                                cfur = copy_from_user(tidsm,
                                        iov[idx].iov_base, tidsmsize);
                                if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                qib_user_sdma_hwqueue_clean(ppd);
                qib_user_sdma_queue_clean(ppd, pq);
                mutex_unlock(&pq->lock);
-               msleep(10);
+               msleep(20);
        }
 
        if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
 
        if (nfree && !list_empty(pktlist))
                goto retry;
-
-       return;
 }
 
 /* pq->lock must be held, get packets on the wire... */
index 9bcfbd8429804e237b23555a54bbd2b1d0fc5a27..4a3599890ea5f114655a34e472bee9197d73bd2c 100644 (file)
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
 done:
        if (dd->flags & QIB_USE_SPCL_TRIG) {
                u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
                qib_flush_wc();
                __raw_writel(0xaebecede, piobuf_orig + spcl_off);
        }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
         * we allow allocations of more than we report for this value.
         */
 
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
+       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
                goto bail;
        }
 
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+       ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
        if (!ah) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
        struct ib_ah *ah = ERR_PTR(-EINVAL);
        struct qib_qp *qp0;
 
-       memset(&attr, 0, sizeof attr);
+       memset(&attr, 0, sizeof(attr));
        attr.dlid = dlid;
        attr.port_num = ppd_from_ibp(ibp)->port;
        rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
        struct qib_ucontext *context;
        struct ib_ucontext *ret;
 
-       context = kmalloc(sizeof *context, GFP_KERNEL);
+       context = kmalloc(sizeof(*context), GFP_KERNEL);
        if (!context) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
 
        dev->qp_table_size = ib_qib_qp_table_size;
        get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
-       dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
+       dev->qp_table = kmalloc_array(
+                               dev->qp_table_size,
+                               sizeof(*dev->qp_table),
                                GFP_KERNEL);
        if (!dev->qp_table) {
                ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        for (i = 0; i < ppd->sdma_descq_cnt; i++) {
                struct qib_verbs_txreq *tx;
 
-               tx = kzalloc(sizeof *tx, GFP_KERNEL);
+               tx = kzalloc(sizeof(*tx), GFP_KERNEL);
                if (!tx) {
                        ret = -ENOMEM;
                        goto err_tx;
index dabb697b1c2a6025ae72927740582a1cb277f796..f8ea069a3eafca4078ab70848c3804867609143c 100644 (file)
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
 {
        struct qib_mcast_qp *mqp;
 
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+       mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
        if (!mqp)
                goto bail;
 
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
 {
        struct qib_mcast *mcast;
 
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+       mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
        if (!mcast)
                goto bail;
 
index 1d7281c5a02eaf633fed9d4b71d4bec2b9433915..81b225f2300aed34eab9b88077bd4c4097dceae2 100644 (file)
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        if (dd->piobcnt2k && dd->piobcnt4k) {
                /* 2 sizes for chip */
                unsigned long pio2kbase, pio4kbase;
+
                pio2kbase = dd->piobufbase & 0xffffffffUL;
                pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
                if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        }
 
        for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
+               ; /* do nothing */
 
        if (piolen != (1ULL << bits)) {
                piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
                piolen = 1ULL << (bits + 1);
        }
        if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               atmp = pioaddr & ~(piolen - 1);
+               u64 atmp = pioaddr & ~(piolen - 1);
+
                if (atmp < addr || (atmp + piolen) > (addr + len)) {
                        qib_dev_err(dd,
                                "No way to align address/size (%llx/%llx), no WC mtrr\n",
index 5ce26817e7e1d9b8d43126518188b3769e0c7f44..b47aea1094b2d9f7e434cf8a442f4b83b1a28f3c 100644 (file)
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
                           enum dma_data_direction dma_dir);
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data);
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir);
+
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
index 3821633f1065b15a9fcc1b1c36a74eda619aeae3..20e859a6f1a63b2fede51dcb2b02b9b6f638a36e 100644 (file)
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
 
-       if (!iser_conn->rx_descs)
-               goto free_login_buf;
-
        if (device->iser_free_rdma_reg_res)
                device->iser_free_rdma_reg_res(ib_conn);
 
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        /* make sure we never redo any unmapping */
        iser_conn->rx_descs = NULL;
 
-free_login_buf:
        iser_free_login_buf(iser_conn);
 }
 
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_IN]);
+                                                &iser_task->data[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_IN]);
+                                                &iser_task->prot[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
        }
 
        if (iser_task->dir[ISER_DIR_OUT]) {
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_OUT]);
+                                                &iser_task->data[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_OUT]);
+                                                &iser_task->prot[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
        }
 }
index abce9339333f0a8551a2e52600d2409edd39461c..341040bf09849d41e4e01c613e0d0e01683fceda 100644 (file)
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 }
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data)
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir)
 {
        struct ib_device *dev;
 
        dev = iser_task->iser_conn->ib_conn.device->ib_device;
-       ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+       ib_dma_unmap_sg(dev, data->buf, data->size, dir);
 }
 
 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
                iser_data_buf_dump(mem, ibdev);
 
        /* unmap the command data before accessing it */
-       iser_dma_unmap_task_data(iser_task, mem);
+       iser_dma_unmap_task_data(iser_task, mem,
+                                (cmd_dir == ISER_DIR_OUT) ?
+                                DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        /* allocate copy buf, if we are writing, copy the */
        /* unaligned scatterlist, dma map the copy        */
index 695a2704bd4380acafbc04c4ae7f0ecd96bc95a0..4065abe28829f78356c5ee7cd76ffa7b7330b3d5 100644 (file)
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
 /**
  * iser_free_ib_conn_res - release IB related resources
  * @iser_conn: iser connection struct
- * @destroy_device: indicator if we need to try to release
- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
- *     will use this.
+ * @destroy: indicator if we need to try to release the
+ *     iser device and memory regoins pool (only iscsi
+ *     shutdown and DEVICE_REMOVAL will use this).
  *
  * This routine is called with the iser state mutex held
  * so the cm_id removal is out of here. It is Safe to
  * be invoked multiple times.
  */
 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
-                                 bool destroy_device)
+                                 bool destroy)
 {
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
        iser_info("freeing conn %p cma_id %p qp %p\n",
                  iser_conn, ib_conn->cma_id, ib_conn->qp);
 
-       iser_free_rx_descriptors(iser_conn);
-
        if (ib_conn->qp != NULL) {
                ib_conn->comp->active_qps--;
                rdma_destroy_qp(ib_conn->cma_id);
                ib_conn->qp = NULL;
        }
 
-       if (destroy_device && device != NULL) {
-               iser_device_try_release(device);
-               ib_conn->device = NULL;
+       if (destroy) {
+               if (iser_conn->rx_descs)
+                       iser_free_rx_descriptors(iser_conn);
+
+               if (device != NULL) {
+                       iser_device_try_release(device);
+                       ib_conn->device = NULL;
+               }
        }
 }
 
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
        mutex_unlock(&ig.connlist_mutex);
 
        mutex_lock(&iser_conn->state_mutex);
+       /* In case we endup here without ep_disconnect being invoked. */
        if (iser_conn->state != ISER_CONN_DOWN) {
                iser_warn("iser conn %p state %d, expected state down.\n",
                          iser_conn, iser_conn->state);
+               iscsi_destroy_endpoint(iser_conn->ep);
                iser_conn->state = ISER_CONN_DOWN;
        }
        /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
 }
 
 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
-                                bool destroy_device)
+                                bool destroy)
 {
        struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
 
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
         * and flush errors.
         */
        iser_disconnected_handler(cma_id);
-       iser_free_ib_conn_res(iser_conn, destroy_device);
+       iser_free_ib_conn_res(iser_conn, destroy);
        complete(&iser_conn->ib_completion);
 };
 
index 867cc5084afbfce8ab24cfd87d5f52cda93697de..b513e662d8e4999401f3c7366368bfb3c7900664 100644 (file)
@@ -90,6 +90,7 @@ enum {
 };
 
 enum {
+       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
+struct ib_uverbs_ex_query_device {
+       __u32 comp_mask;
+       __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+       __u64 general_caps;
+       struct {
+               __u32 rc_odp_caps;
+               __u32 uc_odp_caps;
+               __u32 ud_odp_caps;
+       } per_transport_caps;
+       __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+       struct ib_uverbs_query_device_resp base;
+       __u32 comp_mask;
+       __u32 response_length;
+       struct ib_uverbs_odp_caps odp_caps;
+};
+
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;