]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
bnx2x: Add support in PF driver for RSC
authorMichal Kalderon <michals@broadcom.com>
Wed, 12 Feb 2014 16:19:53 +0000 (18:19 +0200)
committerDavid S. Miller <davem@davemloft.net>
Thu, 13 Feb 2014 00:15:41 +0000 (19:15 -0500)
This provides PF-side support for VFs assigned to a VM running windows
2012 with the RSC feature enabled.

Signed-off-by: Michal Kalderon <michals@broadcom.com>
Signed-off-by: Yuval Mintz <yuvalmin@broadcom.com>
Signed-off-by: Ariel Elior <ariele@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h

index ae91e8f436224a8d6a74587630bf4902d1ab93d1..c871d19ab6e1131b5622cef8dd40a38c624b27ec 100644 (file)
@@ -1270,6 +1270,7 @@ struct bnx2x_slowpath {
        union {
                struct client_init_ramrod_data  init_data;
                struct client_update_ramrod_data update_data;
+               struct tpa_update_ramrod_data tpa_data;
        } q_rdata;
 
        union {
index 38f04018110b063872e6b78b03a20063a7553669..56a7d3f2128a80df69ce187a2960defa0bfc058a 100644 (file)
@@ -1814,6 +1814,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
                drv_cmd = BNX2X_Q_CMD_EMPTY;
                break;
 
+       case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+               DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+               drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+               break;
+
        default:
                BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
                          command, fp->index);
@@ -3644,10 +3649,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
                        cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
                                    HW_CID(bp, cid));
 
-       type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
-       type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
-                SPE_HDR_FUNCTION_ID);
+       /* In some cases, type may already contain the func-id
+        * mainly in SRIOV related use cases, so we add it here only
+        * if it's not already set.
+        */
+       if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+               type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+                       SPE_HDR_CONN_TYPE;
+               type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+                        SPE_HDR_FUNCTION_ID);
+       } else {
+               type = cmd_type;
+       }
 
        spe->hdr.type = cpu_to_le16(type);
 
index 0fb6ff2ac8e3738372bcb95f4278ef600c2bd76b..270ba195a56bfc1984b2830fa01f720242c5f9e9 100644 (file)
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
                         data->header.rule_cnt, p->rx_accept_flags,
                         p->tx_accept_flags);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
                raw->clear_pending(raw);
                return 0;
        } else {
-               /* No need for an explicit memory barrier here as long we would
-                * need to ensure the ordering of writing to the SPQ element
+               /* No need for an explicit memory barrier here as long as we
+                * ensure the ordering of writing to the SPQ element
                 * and updating of the SPQ producer which involves a memory
-                * read and we will have to put a full memory barrier there
-                * (inside bnx2x_sp_post()).
+                * read. If the memory read is removed we will have to put a
+                * full memory barrier there (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
                raw->clear_pending(raw);
                return 0;
        } else {
-               /* No need for an explicit memory barrier here as long we would
-                * need to ensure the ordering of writing to the SPQ element
+               /* No need for an explicit memory barrier here as long as we
+                * ensure the ordering of writing to the SPQ element
                 * and updating of the SPQ producer which involves a memory
-                * read and we will have to put a full memory barrier there
-                * (inside bnx2x_sp_post()).
+                * read. If the memory read is removed we will have to put a
+                * full memory barrier there (inside bnx2x_sp_post()).
                 */
 
                /* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
                data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
        }
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
 
        /* Send a ramrod */
@@ -4587,13 +4587,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
        /* Fill the ramrod data */
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4614,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
        bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
        bnx2x_q_fill_setup_data_e2(bp, params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4657,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
                         o->cids[cid_index], rdata->general.client_id,
                         rdata->general.sp_client_id, rdata->general.cos);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4757,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
        /* Fill the ramrod data */
        bnx2x_q_fill_update_data(bp, o, update_params, rdata);
 
-       /* No need for an explicit memory barrier here as long we would
-        * need to ensure the ordering of writing to the SPQ element
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
         * and updating of the SPQ producer which involves a memory
-        * read and we will have to put a full memory barrier there
-        * (inside bnx2x_sp_post()).
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
-
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
                             o->cids[cid_index], U64_HI(data_mapping),
                             U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4809,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
        return bnx2x_q_send_update(bp, params);
 }
 
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+                               struct bnx2x_queue_sp_obj *obj,
+                               struct bnx2x_queue_update_tpa_params *params,
+                               struct tpa_update_ramrod_data *data)
+{
+       data->client_id = obj->cl_id;
+       data->complete_on_both_clients = params->complete_on_both_clients;
+       data->dont_verify_rings_pause_thr_flg =
+               params->dont_verify_thr;
+       data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+       data->max_sges_for_packet = params->max_sges_pkt;
+       data->max_tpa_queues = params->max_tpa_queues;
+       data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+       data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+       data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+       data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+       data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+       data->tpa_mode = params->tpa_mode;
+       data->update_ipv4 = params->update_ipv4;
+       data->update_ipv6 = params->update_ipv6;
+}
+
 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
                                        struct bnx2x_queue_state_params *params)
 {
-       /* TODO: Not implemented yet. */
-       return -1;
+       struct bnx2x_queue_sp_obj *o = params->q_obj;
+       struct tpa_update_ramrod_data *rdata =
+               (struct tpa_update_ramrod_data *)o->rdata;
+       dma_addr_t data_mapping = o->rdata_mapping;
+       struct bnx2x_queue_update_tpa_params *update_tpa_params =
+               &params->params.update_tpa;
+       u16 type;
+
+       /* Clear the ramrod data */
+       memset(rdata, 0, sizeof(*rdata));
+
+       /* Fill the ramrod data */
+       bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+       /* Add the function id inside the type, so that sp post function
+        * doesn't automatically add the PF func-id, this is required
+        * for operations done by PFs on behalf of their VFs
+        */
+       type = ETH_CONNECTION_TYPE |
+               ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
+       return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+                            o->cids[BNX2X_PRIMARY_CID_INDEX],
+                            U64_HI(data_mapping),
+                            U64_LO(data_mapping), type);
 }
 
 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5694,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
        rdata->tx_switch_suspend = switch_update_params->suspend;
        rdata->echo = SWITCH_UPDATE;
 
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5727,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
        rdata->allowed_priorities = afex_update_params->allowed_priorities;
        rdata->echo = AFEX_UPDATE;
 
-       /*  No need for an explicit memory barrier here as long we would
-        *  need to ensure the ordering of writing to the SPQ element
-        *  and updating of the SPQ producer which involves a memory
-        *  read and we will have to put a full memory barrier there
-        *  (inside bnx2x_sp_post()).
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
         */
        DP(BNX2X_MSG_SP,
           "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5816,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
                rdata->traffic_type_to_priority_cos[i] =
                        tx_start_params->traffic_type_to_priority_cos[i];
 
+       /* No need for an explicit memory barrier here as long as we
+        * ensure the ordering of writing to the SPQ element
+        * and updating of the SPQ producer which involves a memory
+        * read. If the memory read is removed we will have to put a
+        * full memory barrier there (inside bnx2x_sp_post()).
+        */
        return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
                             U64_HI(data_mapping),
                             U64_LO(data_mapping), NONE_CONNECTION_TYPE);
index 00d7f214a40a2caf2619fac0343ad3798b4bdaed..f7af21fc8ecc7fc3859bbbe4816131950c4042f5 100644 (file)
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
        u8              cid_index;
 };
 
+struct bnx2x_queue_update_tpa_params {
+       dma_addr_t sge_map;
+       u8 update_ipv4;
+       u8 update_ipv6;
+       u8 max_tpa_queues;
+       u8 max_sges_pkt;
+       u8 complete_on_both_clients;
+       u8 dont_verify_thr;
+       u8 tpa_mode;
+       u8 _pad;
+
+       u16 sge_buff_sz;
+       u16 max_agg_sz;
+
+       u16 sge_pause_thr_low;
+       u16 sge_pause_thr_high;
+};
+
 struct rxq_pause_params {
        u16             bd_th_lo;
        u16             bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
        /* Params according to the current command */
        union {
                struct bnx2x_queue_update_params        update;
+               struct bnx2x_queue_update_tpa_params    update_tpa;
                struct bnx2x_queue_setup_params         setup;
                struct bnx2x_queue_init_params          init;
                struct bnx2x_queue_setup_tx_only_params tx_only;
index 5c4980c664174ef3b76fba35a48b699a6fc6196f..a4a3d7e04df9ecc95aa6bbae421a96213d38edeb 100644 (file)
@@ -176,6 +176,11 @@ enum bnx2x_vfop_rss_state {
           BNX2X_VFOP_RSS_DONE
 };
 
+enum bnx2x_vfop_tpa_state {
+          BNX2X_VFOP_TPA_CONFIG,
+          BNX2X_VFOP_TPA_DONE
+};
+
 #define bnx2x_vfop_reset_wq(vf)        atomic_set(&vf->op_in_progress, 0)
 
 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -3047,6 +3052,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
        return -ENOMEM;
 }
 
+/* VFOP tpa update, send update on all queues */
+static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+       struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+       struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
+       enum bnx2x_vfop_tpa_state state = vfop->state;
+
+       bnx2x_vfop_reset_wq(vf);
+
+       if (vfop->rc < 0)
+               goto op_err;
+
+       DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
+          vf->abs_vfid, tpa_args->qid,
+          state);
+
+       switch (state) {
+       case BNX2X_VFOP_TPA_CONFIG:
+
+               if (tpa_args->qid < vf_rxq_count(vf)) {
+                       struct bnx2x_queue_state_params *qstate =
+                               &vf->op_params.qstate;
+
+                       qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
+
+                       /* The only thing that changes for the ramrod params
+                        * between calls is the sge_map
+                        */
+                       qstate->params.update_tpa.sge_map =
+                               tpa_args->sge_map[tpa_args->qid];
+
+                       DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
+                          tpa_args->qid,
+                          U64_HI(qstate->params.update_tpa.sge_map),
+                          U64_LO(qstate->params.update_tpa.sge_map));
+                       qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
+                       vfop->rc = bnx2x_queue_state_change(bp, qstate);
+
+                       tpa_args->qid++;
+                       bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+               }
+               vfop->state = BNX2X_VFOP_TPA_DONE;
+               vfop->rc = 0;
+               bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+               BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
+op_done:
+       case BNX2X_VFOP_TPA_DONE:
+               bnx2x_vfop_end(bp, vf, vfop);
+               return;
+       default:
+               bnx2x_vfop_default(state);
+       }
+op_pending:
+       return;
+}
+
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+                       struct bnx2x_virtf *vf,
+                       struct bnx2x_vfop_cmd *cmd,
+                       struct vfpf_tpa_tlv *tpa_tlv)
+{
+       struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+       if (vfop) {
+               vfop->args.qx.qid = 0; /* loop */
+               memcpy(&vfop->args.tpa.sge_map,
+                      tpa_tlv->tpa_client_info.sge_addr,
+                      sizeof(vfop->args.tpa.sge_map));
+               bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
+                                bnx2x_vfop_tpa, cmd->done);
+               return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
+                                            cmd->block);
+       }
+       return -ENOMEM;
+}
+
 /* VF release ~ VF close + VF release-resources
  * Release is the ultimate SW shutdown and is called whenever an
  * irrecoverable error is encountered.
index d9fcca1b5a9db2365773c48c4d9a3ad28462d797..9b60e80c89fe9b5aae6bea80fa2aba4e476deb83 100644 (file)
@@ -100,6 +100,7 @@ union bnx2x_vfop_params {
        struct bnx2x_mcast_ramrod_params        mcast;
        struct bnx2x_config_rss_params          rss;
        struct bnx2x_vfop_qctor_params          qctor;
+       struct bnx2x_queue_state_params         qstate;
 };
 
 /* forward */
@@ -166,6 +167,11 @@ struct bnx2x_vfop_args_filters {
        atomic_t *credit;       /* non NULL means 'don't consume credit' */
 };
 
+struct bnx2x_vfop_args_tpa {
+       int        qid;
+       dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
+};
+
 union bnx2x_vfop_args {
        struct bnx2x_vfop_args_mcast    mc_list;
        struct bnx2x_vfop_args_qctor    qctor;
@@ -173,6 +179,7 @@ union bnx2x_vfop_args {
        struct bnx2x_vfop_args_defvlan  defvlan;
        struct bnx2x_vfop_args_qx       qx;
        struct bnx2x_vfop_args_filters  filters;
+       struct bnx2x_vfop_args_tpa      tpa;
 };
 
 struct bnx2x_vfop {
@@ -704,6 +711,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
                       struct bnx2x_virtf *vf,
                       struct bnx2x_vfop_cmd *cmd);
 
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+                      struct bnx2x_virtf *vf,
+                      struct bnx2x_vfop_cmd *cmd,
+                      struct vfpf_tpa_tlv *tpa_tlv);
+
 /* VF release ~ VF close + VF release-resources
  *
  * Release is the ultimate SW shutdown and is called whenever an
index ebad48a330e73fb493dc55dcd02dbc5df301453b..dfaed288becd11576fa8abfee42efa898fdf9100 100644 (file)
@@ -1159,7 +1159,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        resp->pfdev_info.db_size = bp->db_size;
        resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
        resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
-                                  /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+                                  PFVF_CAP_TPA |
+                                  PFVF_CAP_TPA_UPDATE);
        bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
                          sizeof(resp->pfdev_info.fw_ver));
 
@@ -1910,6 +1911,75 @@ mbx_resp:
                bnx2x_vf_mbx_resp(bp, vf);
 }
 
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+                                      struct vfpf_tpa_tlv *tpa_tlv)
+{
+       int rc = 0;
+
+       if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+           U_ETH_MAX_SGES_FOR_PACKET) {
+               rc = -EINVAL;
+               BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+                         tpa_tlv->tpa_client_info.max_sges_for_packet,
+                         U_ETH_MAX_SGES_FOR_PACKET);
+       }
+
+       if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+               rc = -EINVAL;
+               BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+                         tpa_tlv->tpa_client_info.max_tpa_queues,
+                         MAX_AGG_QS(bp));
+       }
+
+       return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+                                   struct bnx2x_vf_mbx *mbx)
+{
+       struct bnx2x_vfop_cmd cmd = {
+               .done = bnx2x_vf_mbx_resp,
+               .block = false,
+       };
+       struct bnx2x_queue_update_tpa_params *vf_op_params =
+               &vf->op_params.qstate.params.update_tpa;
+       struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+
+       memset(vf_op_params, 0, sizeof(*vf_op_params));
+
+       if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+               goto mbx_resp;
+
+       vf_op_params->complete_on_both_clients =
+               tpa_tlv->tpa_client_info.complete_on_both_clients;
+       vf_op_params->dont_verify_thr =
+               tpa_tlv->tpa_client_info.dont_verify_thr;
+       vf_op_params->max_agg_sz =
+               tpa_tlv->tpa_client_info.max_agg_size;
+       vf_op_params->max_sges_pkt =
+               tpa_tlv->tpa_client_info.max_sges_for_packet;
+       vf_op_params->max_tpa_queues =
+               tpa_tlv->tpa_client_info.max_tpa_queues;
+       vf_op_params->sge_buff_sz =
+               tpa_tlv->tpa_client_info.sge_buff_size;
+       vf_op_params->sge_pause_thr_high =
+               tpa_tlv->tpa_client_info.sge_pause_thr_high;
+       vf_op_params->sge_pause_thr_low =
+               tpa_tlv->tpa_client_info.sge_pause_thr_low;
+       vf_op_params->tpa_mode =
+               tpa_tlv->tpa_client_info.tpa_mode;
+       vf_op_params->update_ipv4 =
+               tpa_tlv->tpa_client_info.update_ipv4;
+       vf_op_params->update_ipv6 =
+               tpa_tlv->tpa_client_info.update_ipv6;
+
+       vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
+
+mbx_resp:
+       if (vf->op_rc)
+               bnx2x_vf_mbx_resp(bp, vf);
+}
+
 /* dispatch request */
 static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                  struct bnx2x_vf_mbx *mbx)
@@ -1949,6 +2019,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
                case CHANNEL_TLV_UPDATE_RSS:
                        bnx2x_vf_mbx_update_rss(bp, vf, mbx);
                        return;
+               case CHANNEL_TLV_UPDATE_TPA:
+                       bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+                       return;
                }
 
        } else {
index 208568bc7a71d72d8e19aea70f02485e17c0e8ee..c922b81170e5bc20c4ff69d34d16d40f853c9d8a 100644 (file)
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
 #define PFVF_CAP_RSS           0x00000001
 #define PFVF_CAP_DHC           0x00000002
 #define PFVF_CAP_TPA           0x00000004
+#define PFVF_CAP_TPA_UPDATE    0x00000008
                char fw_ver[32];
                u16 db_size;
                u8  indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
        u32 rx_mask;    /* see mask constants at the top of the file */
 };
 
+struct vfpf_tpa_tlv {
+       struct vfpf_first_tlv   first_tlv;
+
+       struct vf_pf_tpa_client_info {
+               aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+               u8 update_ipv4;
+               u8 update_ipv6;
+               u8 max_tpa_queues;
+               u8 max_sges_for_packet;
+               u8 complete_on_both_clients;
+               u8 dont_verify_thr;
+               u8 tpa_mode;
+               u16 sge_buff_size;
+               u16 max_agg_size;
+               u16 sge_pause_thr_low;
+               u16 sge_pause_thr_high;
+       } tpa_client_info;
+};
+
 /* close VF (disable VF) */
 struct vfpf_close_tlv {
        struct vfpf_first_tlv   first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
        struct vfpf_set_q_filters_tlv   set_q_filters;
        struct vfpf_release_tlv         release;
        struct vfpf_rss_tlv             update_rss;
+       struct vfpf_tpa_tlv             update_tpa;
        struct channel_list_end_tlv     list_end;
        struct tlv_buffer_size          tlv_buf_size;
 };
@@ -405,6 +426,7 @@ enum channel_tlvs {
        CHANNEL_TLV_PF_SET_VLAN,
        CHANNEL_TLV_UPDATE_RSS,
        CHANNEL_TLV_PHYS_PORT_ID,
+       CHANNEL_TLV_UPDATE_TPA,
        CHANNEL_TLV_MAX
 };