]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - drivers/infiniband/hw/mlx5/qp.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[karo-tx-linux.git] / drivers / infiniband / hw / mlx5 / qp.c
index d1e921816bfee3596c961e6671d87a84a3caa0ab..a1b3125f0a6eb3a312539a32414202cefc877df8 100644 (file)
@@ -77,12 +77,14 @@ struct mlx5_wqe_eth_pad {
 
 enum raw_qp_set_mask_map {
        MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID         = 1UL << 0,
+       MLX5_RAW_QP_RATE_LIMIT                  = 1UL << 1,
 };
 
 struct mlx5_modify_raw_qp_param {
        u16 operation;
 
        u32 set_mask; /* raw_qp_set_mask_map */
+       u32 rate_limit;
        u8 rq_q_ctr_id;
 };
 
@@ -351,6 +353,29 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
                return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
 }
 
+static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
+{
+       int max_sge;
+
+       if (attr->qp_type == IB_QPT_RC)
+               max_sge = (min_t(int, wqe_size, 512) -
+                          sizeof(struct mlx5_wqe_ctrl_seg) -
+                          sizeof(struct mlx5_wqe_raddr_seg)) /
+                       sizeof(struct mlx5_wqe_data_seg);
+       else if (attr->qp_type == IB_QPT_XRC_INI)
+               max_sge = (min_t(int, wqe_size, 512) -
+                          sizeof(struct mlx5_wqe_ctrl_seg) -
+                          sizeof(struct mlx5_wqe_xrc_seg) -
+                          sizeof(struct mlx5_wqe_raddr_seg)) /
+                       sizeof(struct mlx5_wqe_data_seg);
+       else
+               max_sge = (wqe_size - sq_overhead(attr)) /
+                       sizeof(struct mlx5_wqe_data_seg);
+
+       return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
+                    sizeof(struct mlx5_wqe_data_seg));
+}
+
 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
                        struct mlx5_ib_qp *qp)
 {
@@ -381,13 +406,18 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
        if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
-               mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
+               mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
+                           attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
                            qp->sq.wqe_cnt,
                            1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
                return -ENOMEM;
        }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
-       qp->sq.max_gs = attr->cap.max_send_sge;
+       qp->sq.max_gs = get_send_sge(attr, wqe_size);
+       if (qp->sq.max_gs < attr->cap.max_send_sge)
+               return -ENOMEM;
+
+       attr->cap.max_send_sge = qp->sq.max_gs;
        qp->sq.max_post = wq_size / wqe_size;
        attr->cap.max_send_wr = qp->sq.max_post;
 
@@ -647,7 +677,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
                return PTR_ERR(*umem);
        }
 
-       mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
+       mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
 
        err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
        if (err) {
@@ -700,7 +730,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                return err;
        }
 
-       mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift,
+       mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
                           &ncont, NULL);
        err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
                                     &rwq->rq_page_offset);
@@ -2442,8 +2472,14 @@ out:
 }
 
 static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
-                                  struct mlx5_ib_sq *sq, int new_state)
+                                  struct mlx5_ib_sq *sq,
+                                  int new_state,
+                                  const struct mlx5_modify_raw_qp_param *raw_qp_param)
 {
+       struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
+       u32 old_rate = ibqp->rate_limit;
+       u32 new_rate = old_rate;
+       u16 rl_index = 0;
        void *in;
        void *sqc;
        int inlen;
@@ -2459,10 +2495,44 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
        sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
        MLX5_SET(sqc, sqc, state, new_state);
 
+       if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
+               if (new_state != MLX5_SQC_STATE_RDY)
+                       pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
+                               __func__);
+               else
+                       new_rate = raw_qp_param->rate_limit;
+       }
+
+       if (old_rate != new_rate) {
+               if (new_rate) {
+                       err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
+                       if (err) {
+                               pr_err("Failed configuring rate %u: %d\n",
+                                      new_rate, err);
+                               goto out;
+                       }
+               }
+
+               MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+               MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+       }
+
        err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
-       if (err)
+       if (err) {
+               /* Remove new rate from table if failed */
+               if (new_rate &&
+                   old_rate != new_rate)
+                       mlx5_rl_remove_rate(dev, new_rate);
                goto out;
+       }
+
+       /* Only remove the old rate after new rate was set */
+       if ((old_rate &&
+           (old_rate != new_rate)) ||
+           (new_state != MLX5_SQC_STATE_RDY))
+               mlx5_rl_remove_rate(dev, old_rate);
 
+       ibqp->rate_limit = new_rate;
        sq->state = new_state;
 
 out:
@@ -2477,6 +2547,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
        struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
        struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
+       int modify_rq = !!qp->rq.wqe_cnt;
+       int modify_sq = !!qp->sq.wqe_cnt;
        int rq_state;
        int sq_state;
        int err;
@@ -2494,10 +2566,18 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                rq_state = MLX5_RQC_STATE_RST;
                sq_state = MLX5_SQC_STATE_RST;
                break;
-       case MLX5_CMD_OP_INIT2INIT_QP:
-       case MLX5_CMD_OP_INIT2RTR_QP:
        case MLX5_CMD_OP_RTR2RTS_QP:
        case MLX5_CMD_OP_RTS2RTS_QP:
+               if (raw_qp_param->set_mask ==
+                   MLX5_RAW_QP_RATE_LIMIT) {
+                       modify_rq = 0;
+                       sq_state = sq->state;
+               } else {
+                       return raw_qp_param->set_mask ? -EINVAL : 0;
+               }
+               break;
+       case MLX5_CMD_OP_INIT2INIT_QP:
+       case MLX5_CMD_OP_INIT2RTR_QP:
                if (raw_qp_param->set_mask)
                        return -EINVAL;
                else
@@ -2507,13 +2587,13 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                return -EINVAL;
        }
 
-       if (qp->rq.wqe_cnt) {
-               err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+       if (modify_rq) {
+               err =  modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
                if (err)
                        return err;
        }
 
-       if (qp->sq.wqe_cnt) {
+       if (modify_sq) {
                if (tx_affinity) {
                        err = modify_raw_packet_tx_affinity(dev->mdev, sq,
                                                            tx_affinity);
@@ -2521,7 +2601,7 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                                return err;
                }
 
-               return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state);
+               return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
        }
 
        return 0;
@@ -2577,7 +2657,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        struct mlx5_ib_port *mibport = NULL;
        enum mlx5_qp_state mlx5_cur, mlx5_new;
        enum mlx5_qp_optpar optpar;
-       int sqd_event;
        int mlx5_st;
        int err;
        u16 op;
@@ -2724,12 +2803,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
                context->db_rec_addr = cpu_to_be64(qp->db.dma);
 
-       if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD  &&
-           attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
-               sqd_event = 1;
-       else
-               sqd_event = 0;
-
        if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
                u8 port_num = (attr_mask & IB_QP_PORT ? attr->port_num :
                               qp->port) - 1;
@@ -2776,6 +2849,12 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                        raw_qp_param.rq_q_ctr_id = mibport->q_cnt_id;
                        raw_qp_param.set_mask |= MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID;
                }
+
+               if (attr_mask & IB_QP_RATE_LIMIT) {
+                       raw_qp_param.rate_limit = attr->rate_limit;
+                       raw_qp_param.set_mask |= MLX5_RAW_QP_RATE_LIMIT;
+               }
+
                err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
        } else {
                err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
@@ -3067,10 +3146,10 @@ static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
 {
        memset(umr, 0, sizeof(*umr));
        umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-       umr->flags = 1 << 7;
+       umr->flags = MLX5_UMR_INLINE;
 }
 
-static __be64 get_umr_reg_mr_mask(void)
+static __be64 get_umr_reg_mr_mask(int atomic)
 {
        u64 result;
 
@@ -3083,9 +3162,11 @@ static __be64 get_umr_reg_mr_mask(void)
                 MLX5_MKEY_MASK_KEY             |
                 MLX5_MKEY_MASK_RR              |
                 MLX5_MKEY_MASK_RW              |
-                MLX5_MKEY_MASK_A               |
                 MLX5_MKEY_MASK_FREE;
 
+       if (atomic)
+               result |= MLX5_MKEY_MASK_A;
+
        return cpu_to_be64(result);
 }
 
@@ -3146,7 +3227,7 @@ static __be64 get_umr_update_pd_mask(void)
 }
 
 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
-                               struct ib_send_wr *wr)
+                               struct ib_send_wr *wr, int atomic)
 {
        struct mlx5_umr_wr *umrwr = umr_wr(wr);
 
@@ -3171,7 +3252,7 @@ static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
                if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
                        umr->mkey_mask |= get_umr_update_pd_mask();
                if (!umr->mkey_mask)
-                       umr->mkey_mask = get_umr_reg_mr_mask();
+                       umr->mkey_mask = get_umr_reg_mr_mask(atomic);
        } else {
                umr->mkey_mask = get_umr_unreg_mr_mask();
        }
@@ -4024,7 +4105,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        }
                        qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
                        ctrl->imm = cpu_to_be32(umr_wr(wr)->mkey);
-                       set_reg_umr_segment(seg, wr);
+                       set_reg_umr_segment(seg, wr, !!(MLX5_CAP_GEN(mdev, atomic)));
                        seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
                        size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
                        if (unlikely((seg == qend)))