]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
i40iw: Add missing cleanup on device close
authorMustafa Ismail <mustafa.ismail@intel.com>
Wed, 30 Nov 2016 20:59:26 +0000 (14:59 -0600)
committerDoug Ledford <dledford@redhat.com>
Mon, 5 Dec 2016 21:09:40 +0000 (16:09 -0500)
On i40iw device close, disconnect all connected QPs by moving
them to error state; and block further QPs, PDs and CQs from
being created. Additionally, make sure all resources have been
freed before deallocating the ibdev as part of the device close.

Signed-off-by: Mustafa Ismail <mustafa.ismail@intel.com>
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_cm.c
drivers/infiniband/hw/i40iw/i40iw_cm.h
drivers/infiniband/hw/i40iw/i40iw_d.h
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_utils.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c

index dac9a6bcc63148d05bc4c20e735d4995bc95d808..c795c61602610701bee6e7191fb0a28f8609a43d 100644 (file)
@@ -303,10 +303,13 @@ struct i40iw_device {
        u32 mr_stagmask;
        u32 mpa_version;
        bool dcb;
+       bool closing;
        u32 used_pds;
        u32 used_cqs;
        u32 used_mrs;
        u32 used_qps;
+       wait_queue_head_t close_wq;
+       atomic64_t use_count;
 };
 
 struct i40iw_ib_device {
@@ -521,6 +524,8 @@ int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *)
 
 void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev);
 void i40iw_add_pdusecount(struct i40iw_pd *iwpd);
+void i40iw_rem_devusecount(struct i40iw_device *iwdev);
+void i40iw_add_devusecount(struct i40iw_device *iwdev);
 void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp,
                        struct i40iw_modify_qp_info *info, bool wait);
 
index b60e34653ec153627587e0006a855c647240b578..11ef0b09c8433689dc2b4714dedf3d90751f2bd1 100644 (file)
@@ -4128,3 +4128,34 @@ static void i40iw_cm_post_event(struct i40iw_cm_event *event)
 
        queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
 }
+
+/**
+ * i40iw_cm_disconnect_all - disconnect all connected qp's
+ * @iwdev: device pointer
+ */
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev)
+{
+       struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+       struct list_head *list_core_temp;
+       struct list_head *list_node;
+       struct i40iw_cm_node *cm_node;
+       unsigned long flags;
+       struct list_head connected_list;
+       struct ib_qp_attr attr;
+
+       INIT_LIST_HEAD(&connected_list);
+       spin_lock_irqsave(&cm_core->ht_lock, flags);
+       list_for_each_safe(list_node, list_core_temp, &cm_core->connected_nodes) {
+               cm_node = container_of(list_node, struct i40iw_cm_node, list);
+               atomic_inc(&cm_node->ref_count);
+               list_add(&cm_node->connected_entry, &connected_list);
+       }
+       spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+       list_for_each_safe(list_node, list_core_temp, &connected_list) {
+               cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry);
+               attr.qp_state = IB_QPS_ERR;
+               i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
+               i40iw_rem_ref_cm_node(cm_node);
+       }
+}
index 24615c24cb04ad49aecdc908bd256b89cd7f9cac..0381b7f5e20dc415922577542a0c9141e951b469 100644 (file)
@@ -339,6 +339,7 @@ struct i40iw_cm_node {
        int accept_pend;
        struct list_head timer_entry;
        struct list_head reset_entry;
+       struct list_head connected_entry;
        atomic_t passive_state;
        bool qhash_set;
        u8 user_pri;
@@ -443,4 +444,5 @@ int i40iw_arp_table(struct i40iw_device *iwdev,
                    u8 *mac_addr,
                    u32 action);
 
+void i40iw_cm_disconnect_all(struct i40iw_device *iwdev);
 #endif /* I40IW_CM_H */
index e184c0e99cb16782ed6f8ea616849b8354ec14e6..1bd4badb26d378eed9e8105e7736f32b76e3fe96 100644 (file)
@@ -35,6 +35,8 @@
 #ifndef I40IW_D_H
 #define I40IW_D_H
 
+#define I40IW_FIRST_USER_QP_ID  2
+
 #define I40IW_DB_ADDR_OFFSET    (4 * 1024 * 1024 - 64 * 1024)
 #define I40IW_VF_DB_ADDR_OFFSET (64 * 1024)
 
index 9d3b9ee20ba71303810673f63ebad26aec7592a1..d86bb6e98f0781e2711e6f8c92ecf18904b5d520 100644 (file)
@@ -1546,6 +1546,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
 
        init_waitqueue_head(&iwdev->vchnl_waitq);
        init_waitqueue_head(&dev->vf_reqs);
+       init_waitqueue_head(&iwdev->close_wq);
 
        status = i40iw_initialize_dev(iwdev, ldev);
 exit:
@@ -1748,6 +1749,9 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool
                return;
 
        iwdev = &hdl->device;
+       iwdev->closing = true;
+
+       i40iw_cm_disconnect_all(iwdev);
        destroy_workqueue(iwdev->virtchnl_wq);
        i40iw_deinit_device(iwdev, reset);
 }
index 4e880e8689ebe9b293d43bd39989e2b3b0f582d3..58151280828d72c8462f516453a8b18639e4bfc1 100644 (file)
@@ -392,6 +392,7 @@ static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num)
 
        i40iw_rem_pdusecount(iwqp->iwpd, iwdev);
        i40iw_free_qp_resources(iwdev, iwqp, qp_num);
+       i40iw_rem_devusecount(iwdev);
 }
 
 /**
@@ -458,6 +459,26 @@ enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev,
        return status;
 }
 
+/**
+ * i40iw_add_devusecount - add dev refcount
+ * @iwdev: dev for refcount
+ */
+void i40iw_add_devusecount(struct i40iw_device *iwdev)
+{
+       atomic64_inc(&iwdev->use_count);
+}
+
+/**
+ * i40iw_rem_devusecount - decrement refcount for dev
+ * @iwdev: device
+ */
+void i40iw_rem_devusecount(struct i40iw_device *iwdev)
+{
+       if (!atomic64_dec_and_test(&iwdev->use_count))
+               return;
+       wake_up(&iwdev->close_wq);
+}
+
 /**
  * i40iw_add_pdusecount - add pd refcount
  * @iwpd: pd for refcount
index 1c2f0a19bd6306a51678da255e6befe3e04af0ac..bc24086989e3b911ea8c82e845dbbef5025b71a2 100644 (file)
@@ -336,6 +336,9 @@ static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev,
        u32 pd_id = 0;
        int err;
 
+       if (iwdev->closing)
+               return ERR_PTR(-ENODEV);
+
        err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds,
                                   iwdev->max_pd, &pd_id, &iwdev->next_pd);
        if (err) {
@@ -601,6 +604,9 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
        struct i40iwarp_offload_info *iwarp_info;
        unsigned long flags;
 
+       if (iwdev->closing)
+               return ERR_PTR(-ENODEV);
+
        if (init_attr->create_flags)
                return ERR_PTR(-EINVAL);
        if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE)
@@ -776,6 +782,7 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd,
        iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
        iwdev->qp_table[qp_num] = iwqp;
        i40iw_add_pdusecount(iwqp->iwpd);
+       i40iw_add_devusecount(iwdev);
        if (ibpd->uobject && udata) {
                memset(&uresp, 0, sizeof(uresp));
                uresp.actual_sq_size = sq_size;
@@ -887,6 +894,11 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        spin_lock_irqsave(&iwqp->lock, flags);
 
        if (attr_mask & IB_QP_STATE) {
+               if (iwdev->closing && attr->qp_state != IB_QPS_ERR) {
+                       err = -EINVAL;
+                       goto exit;
+               }
+
                switch (attr->qp_state) {
                case IB_QPS_INIT:
                case IB_QPS_RTR:
@@ -1086,6 +1098,7 @@ static int i40iw_destroy_cq(struct ib_cq *ib_cq)
        cq_wq_destroy(iwdev, cq);
        cq_free_resources(iwdev, iwcq);
        kfree(iwcq);
+       i40iw_rem_devusecount(iwdev);
        return 0;
 }
 
@@ -1116,6 +1129,9 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
        int err_code;
        int entries = attr->cqe;
 
+       if (iwdev->closing)
+               return ERR_PTR(-ENODEV);
+
        if (entries > iwdev->max_cqe)
                return ERR_PTR(-EINVAL);
 
@@ -1233,6 +1249,7 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev,
                }
        }
 
+       i40iw_add_devusecount(iwdev);
        return (struct ib_cq *)iwcq;
 
 cq_destroy:
@@ -1270,6 +1287,7 @@ static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag)
 
        stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT;
        i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx);
+       i40iw_rem_devusecount(iwdev);
 }
 
 /**
@@ -1300,6 +1318,7 @@ static u32 i40iw_create_stag(struct i40iw_device *iwdev)
                stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT;
                stag |= driver_key;
                stag += (u32)consumer_key;
+               i40iw_add_devusecount(iwdev);
        }
        return stag;
 }
@@ -1809,6 +1828,9 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
        int ret;
        int pg_shift;
 
+       if (iwdev->closing)
+               return ERR_PTR(-ENODEV);
+
        if (length > I40IW_MAX_MR_SIZE)
                return ERR_PTR(-EINVAL);
        region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
@@ -2842,6 +2864,9 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
        i40iw_unregister_rdma_device(iwibdev);
        kfree(iwibdev->ibdev.iwcm);
        iwibdev->ibdev.iwcm = NULL;
+       wait_event_timeout(iwibdev->iwdev->close_wq,
+                          !atomic64_read(&iwibdev->iwdev->use_count),
+                          I40IW_EVENT_TIMEOUT);
        ib_dealloc_device(&iwibdev->ibdev);
 }