1 /* bnx2x_sriov.c: Broadcom Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Shmulik Ravid <shmulikr@broadcom.com>
17 * Ariel Elior <ariele@broadcom.com>
21 #include "bnx2x_init.h"
22 #include "bnx2x_cmn.h"
24 #include <linux/crc32.h>
25 #include <linux/if_vlan.h>
27 /* General service functions */
28 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
31 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
33 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
35 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
37 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
41 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
44 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
46 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
48 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
50 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
54 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
59 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
65 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
67 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
68 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
71 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
72 u8 igu_sb_id, u8 segment, u16 index, u8 op,
75 /* acking a VF sb through the PF - use the GRC */
77 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
78 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
79 u32 func_encode = vf->abs_vfid;
80 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
81 struct igu_regular cmd_data = {0};
83 cmd_data.sb_id_and_flags =
84 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
85 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
86 (update << IGU_REGULAR_BUPDATE_SHIFT) |
87 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
89 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
90 func_encode << IGU_CTRL_REG_FID_SHIFT |
91 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
93 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
94 cmd_data.sb_id_and_flags, igu_addr_data);
95 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
99 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 REG_WR(bp, igu_addr_ctl, ctl);
105 /* VFOP - VF slow-path operation support */
107 #define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
109 /* VFOP operations states */
110 enum bnx2x_vfop_qctor_state {
111 BNX2X_VFOP_QCTOR_INIT,
112 BNX2X_VFOP_QCTOR_SETUP,
113 BNX2X_VFOP_QCTOR_INT_EN
116 enum bnx2x_vfop_qdtor_state {
117 BNX2X_VFOP_QDTOR_HALT,
118 BNX2X_VFOP_QDTOR_TERMINATE,
119 BNX2X_VFOP_QDTOR_CFCDEL,
120 BNX2X_VFOP_QDTOR_DONE
123 enum bnx2x_vfop_vlan_mac_state {
124 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
125 BNX2X_VFOP_VLAN_MAC_CLEAR,
126 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
127 BNX2X_VFOP_MAC_CONFIG_LIST,
128 BNX2X_VFOP_VLAN_CONFIG_LIST,
129 BNX2X_VFOP_VLAN_CONFIG_LIST_0
132 enum bnx2x_vfop_qsetup_state {
133 BNX2X_VFOP_QSETUP_CTOR,
134 BNX2X_VFOP_QSETUP_VLAN0,
135 BNX2X_VFOP_QSETUP_DONE
138 enum bnx2x_vfop_mcast_state {
139 BNX2X_VFOP_MCAST_DEL,
140 BNX2X_VFOP_MCAST_ADD,
141 BNX2X_VFOP_MCAST_CHK_DONE
143 enum bnx2x_vfop_qflr_state {
144 BNX2X_VFOP_QFLR_CLR_VLAN,
145 BNX2X_VFOP_QFLR_CLR_MAC,
146 BNX2X_VFOP_QFLR_TERMINATE,
150 enum bnx2x_vfop_flr_state {
151 BNX2X_VFOP_FLR_QUEUES,
155 enum bnx2x_vfop_close_state {
156 BNX2X_VFOP_CLOSE_QUEUES,
160 enum bnx2x_vfop_rxmode_state {
161 BNX2X_VFOP_RXMODE_CONFIG,
162 BNX2X_VFOP_RXMODE_DONE
165 enum bnx2x_vfop_qteardown_state {
166 BNX2X_VFOP_QTEARDOWN_RXMODE,
167 BNX2X_VFOP_QTEARDOWN_CLR_VLAN,
168 BNX2X_VFOP_QTEARDOWN_CLR_MAC,
169 BNX2X_VFOP_QTEARDOWN_QDTOR,
170 BNX2X_VFOP_QTEARDOWN_DONE
173 #define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
175 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
176 struct bnx2x_queue_init_params *init_params,
177 struct bnx2x_queue_setup_params *setup_params,
178 u16 q_idx, u16 sb_idx)
181 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
185 init_params->tx.sb_cq_index,
186 init_params->tx.hc_rate,
188 setup_params->txq_params.traffic_type);
191 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
192 struct bnx2x_queue_init_params *init_params,
193 struct bnx2x_queue_setup_params *setup_params,
194 u16 q_idx, u16 sb_idx)
196 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
198 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
199 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
203 init_params->rx.sb_cq_index,
204 init_params->rx.hc_rate,
205 setup_params->gen_params.mtu,
207 rxq_params->sge_buf_sz,
208 rxq_params->max_sges_pkt,
209 rxq_params->tpa_agg_sz,
211 rxq_params->drop_flags,
212 rxq_params->cache_line_log);
215 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
216 struct bnx2x_virtf *vf,
217 struct bnx2x_vf_queue *q,
218 struct bnx2x_vfop_qctor_params *p,
219 unsigned long q_type)
221 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
222 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
226 /* Enable host coalescing in the transition to INIT state */
227 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
228 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
230 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
231 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
234 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
235 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
238 init_p->cxts[0] = q->cxt;
242 /* Setup-op general parameters */
243 setup_p->gen_params.spcl_id = vf->sp_cl_id;
244 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
246 /* Setup-op pause params:
247 * Nothing to do, the pause thresholds are set by default to 0 which
248 * effectively turns off the feature for this queue. We don't want
249 * one queue (VF) to interfering with another queue (another VF)
251 if (vf->cfg_flags & VF_CFG_FW_FC)
252 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
255 * collect statistics, zero statistics, local-switching, security,
256 * OV for Flex10, RSS and MCAST for leading
258 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
259 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
261 /* for VFs, enable tx switching, bd coherency, and mac address
264 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
265 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
266 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
268 if (vfq_is_leading(q)) {
269 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
270 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
273 /* Setup-op rx parameters */
274 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
275 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
277 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
278 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
279 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
281 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
282 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
285 /* Setup-op tx parameters */
286 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
287 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
288 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
292 /* VFOP queue construction */
293 static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
295 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
296 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
297 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
298 enum bnx2x_vfop_qctor_state state = vfop->state;
300 bnx2x_vfop_reset_wq(vf);
305 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
308 case BNX2X_VFOP_QCTOR_INIT:
310 /* has this queue already been opened? */
311 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
312 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
314 "Entered qctor but queue was already up. Aborting gracefully\n");
319 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
321 q_params->cmd = BNX2X_Q_CMD_INIT;
322 vfop->rc = bnx2x_queue_state_change(bp, q_params);
324 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
326 case BNX2X_VFOP_QCTOR_SETUP:
328 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
330 /* copy pre-prepared setup params to the queue-state params */
331 vfop->op_p->qctor.qstate.params.setup =
332 vfop->op_p->qctor.prep_qsetup;
334 q_params->cmd = BNX2X_Q_CMD_SETUP;
335 vfop->rc = bnx2x_queue_state_change(bp, q_params);
337 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
339 case BNX2X_VFOP_QCTOR_INT_EN:
341 /* enable interrupts */
342 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
343 USTORM_ID, 0, IGU_INT_ENABLE, 0);
346 bnx2x_vfop_default(state);
349 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
350 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
352 bnx2x_vfop_end(bp, vf, vfop);
357 static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
358 struct bnx2x_virtf *vf,
359 struct bnx2x_vfop_cmd *cmd,
362 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
365 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
367 vfop->args.qctor.qid = qid;
368 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
370 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
371 bnx2x_vfop_qctor, cmd->done);
372 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
378 /* VFOP queue destruction */
379 static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
381 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
382 struct bnx2x_vfop_args_qdtor *qdtor = &vfop->args.qdtor;
383 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
384 enum bnx2x_vfop_qdtor_state state = vfop->state;
386 bnx2x_vfop_reset_wq(vf);
391 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
394 case BNX2X_VFOP_QDTOR_HALT:
396 /* has this queue already been stopped? */
397 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
398 BNX2X_Q_LOGICAL_STATE_STOPPED) {
400 "Entered qdtor but queue was already stopped. Aborting gracefully\n");
405 vfop->state = BNX2X_VFOP_QDTOR_TERMINATE;
407 q_params->cmd = BNX2X_Q_CMD_HALT;
408 vfop->rc = bnx2x_queue_state_change(bp, q_params);
410 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
412 case BNX2X_VFOP_QDTOR_TERMINATE:
414 vfop->state = BNX2X_VFOP_QDTOR_CFCDEL;
416 q_params->cmd = BNX2X_Q_CMD_TERMINATE;
417 vfop->rc = bnx2x_queue_state_change(bp, q_params);
419 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
421 case BNX2X_VFOP_QDTOR_CFCDEL:
423 vfop->state = BNX2X_VFOP_QDTOR_DONE;
425 q_params->cmd = BNX2X_Q_CMD_CFC_DEL;
426 vfop->rc = bnx2x_queue_state_change(bp, q_params);
428 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
430 BNX2X_ERR("QDTOR[%d:%d] error: cmd %d, rc %d\n",
431 vf->abs_vfid, qdtor->qid, q_params->cmd, vfop->rc);
433 case BNX2X_VFOP_QDTOR_DONE:
434 /* invalidate the context */
435 qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
436 qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
437 bnx2x_vfop_end(bp, vf, vfop);
440 bnx2x_vfop_default(state);
446 static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
447 struct bnx2x_virtf *vf,
448 struct bnx2x_vfop_cmd *cmd,
451 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
454 struct bnx2x_queue_state_params *qstate =
455 &vf->op_params.qctor.qstate;
457 memset(qstate, 0, sizeof(*qstate));
458 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
460 vfop->args.qdtor.qid = qid;
461 vfop->args.qdtor.cxt = bnx2x_vfq(vf, qid, cxt);
463 bnx2x_vfop_opset(BNX2X_VFOP_QDTOR_HALT,
464 bnx2x_vfop_qdtor, cmd->done);
465 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
468 DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
473 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
475 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
477 if (!vf_sb_count(vf))
478 vf->igu_base_id = igu_sb_id;
483 /* VFOP MAC/VLAN helpers */
484 static inline void bnx2x_vfop_credit(struct bnx2x *bp,
485 struct bnx2x_vfop *vfop,
486 struct bnx2x_vlan_mac_obj *obj)
488 struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
490 /* update credit only if there is no error
491 * and a valid credit counter
493 if (!vfop->rc && args->credit) {
494 struct list_head *pos;
498 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
500 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
502 list_for_each(pos, &obj->head)
506 bnx2x_vlan_mac_h_read_unlock(bp, obj);
508 atomic_set(args->credit, cnt);
512 static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
513 struct bnx2x_vfop_filter *pos,
514 struct bnx2x_vlan_mac_data *user_req)
516 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
520 case BNX2X_VFOP_FILTER_MAC:
521 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
523 case BNX2X_VFOP_FILTER_VLAN:
524 user_req->u.vlan.vlan = pos->vid;
527 BNX2X_ERR("Invalid filter type, skipping\n");
534 bnx2x_vfop_config_vlan0(struct bnx2x *bp,
535 struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
540 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
542 vlan_mac->user_req.u.vlan.vlan = 0;
544 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
550 static int bnx2x_vfop_config_list(struct bnx2x *bp,
551 struct bnx2x_vfop_filters *filters,
552 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
554 struct bnx2x_vfop_filter *pos, *tmp;
555 struct list_head rollback_list, *filters_list = &filters->head;
556 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
559 INIT_LIST_HEAD(&rollback_list);
561 list_for_each_entry_safe(pos, tmp, filters_list, link) {
562 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
565 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
567 cnt += pos->add ? 1 : -1;
568 list_move(&pos->link, &rollback_list);
570 } else if (rc == -EEXIST) {
573 BNX2X_ERR("Failed to add a new vlan_mac command\n");
578 /* rollback if error or too many rules added */
579 if (rc || cnt > filters->add_cnt) {
580 BNX2X_ERR("error or too many rules added. Performing rollback\n");
581 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
582 pos->add = !pos->add; /* reverse op */
583 bnx2x_vfop_set_user_req(bp, pos, user_req);
584 bnx2x_config_vlan_mac(bp, vlan_mac);
585 list_del(&pos->link);
591 filters->add_cnt = cnt;
595 /* VFOP set VLAN/MAC */
596 static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
598 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
599 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
600 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
601 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
603 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
608 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
610 bnx2x_vfop_reset_wq(vf);
613 case BNX2X_VFOP_VLAN_MAC_CLEAR:
615 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
618 vfop->rc = obj->delete_all(bp, obj,
619 &vlan_mac->user_req.vlan_mac_flags,
620 &vlan_mac->ramrod_flags);
622 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
624 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
626 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
629 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
630 if (vfop->rc == -EEXIST)
633 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
635 case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
636 vfop->rc = !!obj->raw.check_pending(&obj->raw);
637 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
639 case BNX2X_VFOP_MAC_CONFIG_LIST:
641 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
644 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
648 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
649 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
650 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
652 case BNX2X_VFOP_VLAN_CONFIG_LIST:
654 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
656 /* remove vlan0 - could be no-op */
657 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
661 /* Do vlan list config. if this operation fails we try to
662 * restore vlan0 to keep the queue is working order
664 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
666 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
667 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
669 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
671 case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
673 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
675 if (list_empty(&obj->head))
677 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
678 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
681 bnx2x_vfop_default(state);
684 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
687 bnx2x_vfop_credit(bp, vfop, obj);
688 bnx2x_vfop_end(bp, vf, vfop);
693 struct bnx2x_vfop_vlan_mac_flags {
701 bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
702 struct bnx2x_vfop_vlan_mac_flags *flags)
704 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
706 memset(ramrod, 0, sizeof(*ramrod));
710 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
711 if (flags->single_cmd)
712 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
715 if (flags->dont_consume)
716 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
719 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
723 bnx2x_vfop_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
724 struct bnx2x_vfop_vlan_mac_flags *flags)
726 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, flags);
727 set_bit(BNX2X_ETH_MAC, &ramrod->user_req.vlan_mac_flags);
730 static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
731 struct bnx2x_virtf *vf,
732 struct bnx2x_vfop_cmd *cmd,
733 int qid, bool drv_only)
735 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
738 struct bnx2x_vfop_args_filters filters = {
739 .multi_filter = NULL, /* single */
740 .credit = NULL, /* consume credit */
742 struct bnx2x_vfop_vlan_mac_flags flags = {
743 .drv_only = drv_only,
744 .dont_consume = (filters.credit != NULL),
746 .add = false /* don't care */,
748 struct bnx2x_vlan_mac_ramrod_params *ramrod =
749 &vf->op_params.vlan_mac;
751 /* set ramrod params */
752 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
755 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
758 vfop->args.filters = filters;
760 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
761 bnx2x_vfop_vlan_mac, cmd->done);
762 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
768 int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
769 struct bnx2x_virtf *vf,
770 struct bnx2x_vfop_cmd *cmd,
771 struct bnx2x_vfop_filters *macs,
772 int qid, bool drv_only)
774 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
777 struct bnx2x_vfop_args_filters filters = {
778 .multi_filter = macs,
779 .credit = NULL, /* consume credit */
781 struct bnx2x_vfop_vlan_mac_flags flags = {
782 .drv_only = drv_only,
783 .dont_consume = (filters.credit != NULL),
785 .add = false, /* don't care since only the items in the
786 * filters list affect the sp operation,
787 * not the list itself
790 struct bnx2x_vlan_mac_ramrod_params *ramrod =
791 &vf->op_params.vlan_mac;
793 /* set ramrod params */
794 bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
797 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
800 filters.multi_filter->add_cnt = BNX2X_VFOP_FILTER_ADD_CNT_MAX;
801 vfop->args.filters = filters;
803 bnx2x_vfop_opset(BNX2X_VFOP_MAC_CONFIG_LIST,
804 bnx2x_vfop_vlan_mac, cmd->done);
805 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
811 int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
812 struct bnx2x_virtf *vf,
813 struct bnx2x_vfop_cmd *cmd,
814 int qid, u16 vid, bool add)
816 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
819 struct bnx2x_vfop_args_filters filters = {
820 .multi_filter = NULL, /* single command */
821 .credit = &bnx2x_vfq(vf, qid, vlan_count),
823 struct bnx2x_vfop_vlan_mac_flags flags = {
825 .dont_consume = (filters.credit != NULL),
829 struct bnx2x_vlan_mac_ramrod_params *ramrod =
830 &vf->op_params.vlan_mac;
832 /* set ramrod params */
833 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
834 ramrod->user_req.u.vlan.vlan = vid;
837 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
840 vfop->args.filters = filters;
842 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
843 bnx2x_vfop_vlan_mac, cmd->done);
844 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
850 static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
851 struct bnx2x_virtf *vf,
852 struct bnx2x_vfop_cmd *cmd,
853 int qid, bool drv_only)
855 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
858 struct bnx2x_vfop_args_filters filters = {
859 .multi_filter = NULL, /* single command */
860 .credit = &bnx2x_vfq(vf, qid, vlan_count),
862 struct bnx2x_vfop_vlan_mac_flags flags = {
863 .drv_only = drv_only,
864 .dont_consume = (filters.credit != NULL),
866 .add = false, /* don't care */
868 struct bnx2x_vlan_mac_ramrod_params *ramrod =
869 &vf->op_params.vlan_mac;
871 /* set ramrod params */
872 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
875 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
878 vfop->args.filters = filters;
880 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CLEAR,
881 bnx2x_vfop_vlan_mac, cmd->done);
882 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
888 int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
889 struct bnx2x_virtf *vf,
890 struct bnx2x_vfop_cmd *cmd,
891 struct bnx2x_vfop_filters *vlans,
892 int qid, bool drv_only)
894 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
897 struct bnx2x_vfop_args_filters filters = {
898 .multi_filter = vlans,
899 .credit = &bnx2x_vfq(vf, qid, vlan_count),
901 struct bnx2x_vfop_vlan_mac_flags flags = {
902 .drv_only = drv_only,
903 .dont_consume = (filters.credit != NULL),
905 .add = false, /* don't care */
907 struct bnx2x_vlan_mac_ramrod_params *ramrod =
908 &vf->op_params.vlan_mac;
910 /* set ramrod params */
911 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
914 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
917 filters.multi_filter->add_cnt = vf_vlan_rules_cnt(vf) -
918 atomic_read(filters.credit);
920 vfop->args.filters = filters;
922 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_CONFIG_LIST,
923 bnx2x_vfop_vlan_mac, cmd->done);
924 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
930 /* VFOP queue setup (queue constructor + set vlan 0) */
931 static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
933 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
934 int qid = vfop->args.qctor.qid;
935 enum bnx2x_vfop_qsetup_state state = vfop->state;
936 struct bnx2x_vfop_cmd cmd = {
937 .done = bnx2x_vfop_qsetup,
944 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
947 case BNX2X_VFOP_QSETUP_CTOR:
948 /* init the queue ctor command */
949 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
950 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
955 case BNX2X_VFOP_QSETUP_VLAN0:
956 /* skip if non-leading or FPGA/EMU*/
960 /* init the queue set-vlan command (for vlan 0) */
961 vfop->state = BNX2X_VFOP_QSETUP_DONE;
962 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
967 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
969 case BNX2X_VFOP_QSETUP_DONE:
970 vf->cfg_flags |= VF_CFG_VLAN;
971 smp_mb__before_clear_bit();
972 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
974 smp_mb__after_clear_bit();
975 schedule_delayed_work(&bp->sp_rtnl_task, 0);
976 bnx2x_vfop_end(bp, vf, vfop);
979 bnx2x_vfop_default(state);
983 int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
984 struct bnx2x_virtf *vf,
985 struct bnx2x_vfop_cmd *cmd,
988 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
991 vfop->args.qctor.qid = qid;
993 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
994 bnx2x_vfop_qsetup, cmd->done);
995 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
1001 /* VFOP queue FLR handling (clear vlans, clear macs, queue destructor) */
1002 static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1004 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1005 int qid = vfop->args.qx.qid;
1006 enum bnx2x_vfop_qflr_state state = vfop->state;
1007 struct bnx2x_queue_state_params *qstate;
1008 struct bnx2x_vfop_cmd cmd;
1010 bnx2x_vfop_reset_wq(vf);
1015 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %d\n", vf->abs_vfid, state);
1017 cmd.done = bnx2x_vfop_qflr;
1021 case BNX2X_VFOP_QFLR_CLR_VLAN:
1022 /* vlan-clear-all: driver-only, don't consume credit */
1023 vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
1024 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
1029 case BNX2X_VFOP_QFLR_CLR_MAC:
1030 /* mac-clear-all: driver only consume credit */
1031 vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
1032 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
1034 "VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
1035 vf->abs_vfid, vfop->rc);
1040 case BNX2X_VFOP_QFLR_TERMINATE:
1041 qstate = &vfop->op_p->qctor.qstate;
1042 memset(qstate , 0, sizeof(*qstate));
1043 qstate->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
1044 vfop->state = BNX2X_VFOP_QFLR_DONE;
1046 DP(BNX2X_MSG_IOV, "VF[%d] qstate during flr was %d\n",
1047 vf->abs_vfid, qstate->q_obj->state);
1049 if (qstate->q_obj->state != BNX2X_Q_STATE_RESET) {
1050 qstate->q_obj->state = BNX2X_Q_STATE_STOPPED;
1051 qstate->cmd = BNX2X_Q_CMD_TERMINATE;
1052 vfop->rc = bnx2x_queue_state_change(bp, qstate);
1053 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_VERIFY_PEND);
1059 BNX2X_ERR("QFLR[%d:%d] error: rc %d\n",
1060 vf->abs_vfid, qid, vfop->rc);
1062 case BNX2X_VFOP_QFLR_DONE:
1063 bnx2x_vfop_end(bp, vf, vfop);
1066 bnx2x_vfop_default(state);
1072 static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
1073 struct bnx2x_virtf *vf,
1074 struct bnx2x_vfop_cmd *cmd,
1077 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1080 vfop->args.qx.qid = qid;
1081 bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
1082 bnx2x_vfop_qflr, cmd->done);
1083 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
1089 /* VFOP multi-casts */
1090 static void bnx2x_vfop_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf)
1092 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1093 struct bnx2x_mcast_ramrod_params *mcast = &vfop->op_p->mcast;
1094 struct bnx2x_raw_obj *raw = &mcast->mcast_obj->raw;
1095 struct bnx2x_vfop_args_mcast *args = &vfop->args.mc_list;
1096 enum bnx2x_vfop_mcast_state state = vfop->state;
1099 bnx2x_vfop_reset_wq(vf);
1104 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1107 case BNX2X_VFOP_MCAST_DEL:
1108 /* clear existing mcasts */
1109 vfop->state = BNX2X_VFOP_MCAST_ADD;
1110 vfop->rc = bnx2x_config_mcast(bp, mcast, BNX2X_MCAST_CMD_DEL);
1111 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
1113 case BNX2X_VFOP_MCAST_ADD:
1114 if (raw->check_pending(raw))
1118 /* update mcast list on the ramrod params */
1119 INIT_LIST_HEAD(&mcast->mcast_list);
1120 for (i = 0; i < args->mc_num; i++)
1121 list_add_tail(&(args->mc[i].link),
1122 &mcast->mcast_list);
1123 /* add new mcasts */
1124 vfop->state = BNX2X_VFOP_MCAST_CHK_DONE;
1125 vfop->rc = bnx2x_config_mcast(bp, mcast,
1126 BNX2X_MCAST_CMD_ADD);
1128 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1130 case BNX2X_VFOP_MCAST_CHK_DONE:
1131 vfop->rc = raw->check_pending(raw) ? 1 : 0;
1132 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1134 bnx2x_vfop_default(state);
1137 BNX2X_ERR("MCAST CONFIG error: rc %d\n", vfop->rc);
1140 bnx2x_vfop_end(bp, vf, vfop);
1145 int bnx2x_vfop_mcast_cmd(struct bnx2x *bp,
1146 struct bnx2x_virtf *vf,
1147 struct bnx2x_vfop_cmd *cmd,
1148 bnx2x_mac_addr_t *mcasts,
1149 int mcast_num, bool drv_only)
1151 struct bnx2x_vfop *vfop = NULL;
1152 size_t mc_sz = mcast_num * sizeof(struct bnx2x_mcast_list_elem);
1153 struct bnx2x_mcast_list_elem *mc = mc_sz ? kzalloc(mc_sz, GFP_KERNEL) :
1157 vfop = bnx2x_vfop_add(bp, vf);
1160 struct bnx2x_mcast_ramrod_params *ramrod =
1161 &vf->op_params.mcast;
1163 /* set ramrod params */
1164 memset(ramrod, 0, sizeof(*ramrod));
1165 ramrod->mcast_obj = &vf->mcast_obj;
1167 set_bit(RAMROD_DRV_CLR_ONLY,
1168 &ramrod->ramrod_flags);
1170 /* copy mcasts pointers */
1171 vfop->args.mc_list.mc_num = mcast_num;
1172 vfop->args.mc_list.mc = mc;
1173 for (i = 0; i < mcast_num; i++)
1174 mc[i].mac = mcasts[i];
1176 bnx2x_vfop_opset(BNX2X_VFOP_MCAST_DEL,
1177 bnx2x_vfop_mcast, cmd->done);
1178 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_mcast,
1188 static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
1190 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1191 struct bnx2x_rx_mode_ramrod_params *ramrod = &vfop->op_p->rx_mode;
1192 enum bnx2x_vfop_rxmode_state state = vfop->state;
1194 bnx2x_vfop_reset_wq(vf);
1199 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1202 case BNX2X_VFOP_RXMODE_CONFIG:
1204 vfop->state = BNX2X_VFOP_RXMODE_DONE;
1206 vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
1207 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
1209 BNX2X_ERR("RXMODE error: rc %d\n", vfop->rc);
1211 case BNX2X_VFOP_RXMODE_DONE:
1212 bnx2x_vfop_end(bp, vf, vfop);
1215 bnx2x_vfop_default(state);
1221 int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
1222 struct bnx2x_virtf *vf,
1223 struct bnx2x_vfop_cmd *cmd,
1224 int qid, unsigned long accept_flags)
1226 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
1227 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1230 struct bnx2x_rx_mode_ramrod_params *ramrod =
1231 &vf->op_params.rx_mode;
1233 memset(ramrod, 0, sizeof(*ramrod));
1235 /* Prepare ramrod parameters */
1236 ramrod->cid = vfq->cid;
1237 ramrod->cl_id = vfq_cl_id(vf, vfq);
1238 ramrod->rx_mode_obj = &bp->rx_mode_obj;
1239 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
1241 ramrod->rx_accept_flags = accept_flags;
1242 ramrod->tx_accept_flags = accept_flags;
1243 ramrod->pstate = &vf->filter_state;
1244 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
1246 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1247 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
1248 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
1251 bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
1252 ramrod->rdata_mapping =
1253 bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
1255 bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
1256 bnx2x_vfop_rxmode, cmd->done);
1257 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rxmode,
1263 /* VFOP queue tear-down ('drop all' rx-mode, clear vlans, clear macs,
1266 static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
1268 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1269 int qid = vfop->args.qx.qid;
1270 enum bnx2x_vfop_qteardown_state state = vfop->state;
1271 struct bnx2x_vfop_cmd cmd;
1276 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1278 cmd.done = bnx2x_vfop_qdown;
1282 case BNX2X_VFOP_QTEARDOWN_RXMODE:
1284 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
1285 vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
1290 case BNX2X_VFOP_QTEARDOWN_CLR_VLAN:
1291 /* vlan-clear-all: don't consume credit */
1292 vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_MAC;
1293 vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, false);
1298 case BNX2X_VFOP_QTEARDOWN_CLR_MAC:
1299 /* mac-clear-all: consume credit */
1300 vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
1301 vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, false);
1306 case BNX2X_VFOP_QTEARDOWN_QDTOR:
1307 /* run the queue destruction flow */
1308 DP(BNX2X_MSG_IOV, "case: BNX2X_VFOP_QTEARDOWN_QDTOR\n");
1309 vfop->state = BNX2X_VFOP_QTEARDOWN_DONE;
1310 DP(BNX2X_MSG_IOV, "new state: BNX2X_VFOP_QTEARDOWN_DONE\n");
1311 vfop->rc = bnx2x_vfop_qdtor_cmd(bp, vf, &cmd, qid);
1312 DP(BNX2X_MSG_IOV, "returned from cmd\n");
1317 BNX2X_ERR("QTEARDOWN[%d:%d] error: rc %d\n",
1318 vf->abs_vfid, qid, vfop->rc);
1320 case BNX2X_VFOP_QTEARDOWN_DONE:
1321 bnx2x_vfop_end(bp, vf, vfop);
1324 bnx2x_vfop_default(state);
1328 int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
1329 struct bnx2x_virtf *vf,
1330 struct bnx2x_vfop_cmd *cmd,
1333 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1336 vfop->args.qx.qid = qid;
1337 bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
1338 bnx2x_vfop_qdown, cmd->done);
1339 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
1346 /* VF enable primitives
1347 * when pretend is required the caller is responsible
1348 * for calling pretend prior to calling these routines
1351 /* internal vf enable - until vf is enabled internally all transactions
1352 * are blocked. This routine should always be called last with pretend.
1354 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
1356 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
1359 /* clears vf error in all semi blocks */
1360 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
1362 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
1363 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
1364 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
1365 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
1368 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
1370 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
1371 u32 was_err_reg = 0;
1373 switch (was_err_group) {
1375 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
1378 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
1381 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
1384 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
1387 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
1390 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
1395 /* Set VF masks and configuration - pretend */
1396 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1398 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
1399 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
1400 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
1401 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
1402 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
1403 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
1405 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1406 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
1407 if (vf->cfg_flags & VF_CFG_INT_SIMD)
1408 val |= IGU_VF_CONF_SINGLE_ISR_EN;
1409 val &= ~IGU_VF_CONF_PARENT_MASK;
1410 val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
1411 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1414 "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
1415 vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
1417 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1419 /* iterate over all queues, clear sb consumer */
1420 for (i = 0; i < vf_sb_count(vf); i++) {
1421 u8 igu_sb_id = vf_igu_sb(vf, i);
1423 /* zero prod memory */
1424 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
1426 /* clear sb state machine */
1427 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
1430 /* disable + update */
1431 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
1432 IGU_INT_DISABLE, 1);
1436 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
1438 /* set the VF-PF association in the FW */
1439 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
1440 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
1442 /* clear vf errors*/
1443 bnx2x_vf_semi_clear_err(bp, abs_vfid);
1444 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
1446 /* internal vf-enable - pretend */
1447 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
1448 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
1449 bnx2x_vf_enable_internal(bp, true);
1450 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1453 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
1455 /* Reset vf in IGU interrupts are still disabled */
1456 bnx2x_vf_igu_reset(bp, vf);
1458 /* pretend to enable the vf with the PBF */
1459 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1460 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
1461 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1464 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1466 struct pci_dev *dev;
1467 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1472 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
1474 return bnx2x_is_pcie_pending(dev);
1478 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
1480 /* Verify no pending pci transactions */
1481 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
1482 BNX2X_ERR("PCIE Transactions still pending\n");
1487 /* must be called after the number of PF queues and the number of VFs are
1491 bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
1495 /* will be set only during VF-ACQUIRE */
1499 /* no credit calculcis for macs (just yet) */
1500 resc->num_mac_filters = 1;
1502 /* divvy up vlan rules */
1503 vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
1504 vlan_count = 1 << ilog2(vlan_count);
1505 resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
1507 /* no real limitation */
1508 resc->num_mc_filters = 0;
1510 /* num_sbs already set */
1514 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
1516 /* reset the state variables */
1517 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
1518 vf->state = VF_FREE;
1521 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
1523 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1525 /* DQ usage counter */
1526 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1527 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
1528 "DQ VF usage counter timed out",
1530 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1532 /* FW cleanup command - poll for the results */
1533 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
1535 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
1537 /* verify TX hw is flushed */
1538 bnx2x_tx_hw_flushed(bp, poll_cnt);
1541 static void bnx2x_vfop_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
1543 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
1544 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
1545 enum bnx2x_vfop_flr_state state = vfop->state;
1546 struct bnx2x_vfop_cmd cmd = {
1547 .done = bnx2x_vfop_flr,
1554 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
1557 case BNX2X_VFOP_FLR_QUEUES:
1558 /* the cleanup operations are valid if and only if the VF
1559 * was first acquired.
1561 if (++(qx->qid) < vf_rxq_count(vf)) {
1562 vfop->rc = bnx2x_vfop_qflr_cmd(bp, vf, &cmd,
1568 /* remove multicasts */
1569 vfop->state = BNX2X_VFOP_FLR_HW;
1570 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL,
1575 case BNX2X_VFOP_FLR_HW:
1577 /* dispatch final cleanup and wait for HW queues to flush */
1578 bnx2x_vf_flr_clnup_hw(bp, vf);
1580 /* release VF resources */
1581 bnx2x_vf_free_resc(bp, vf);
1583 /* re-open the mailbox */
1584 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1588 bnx2x_vfop_default(state);
1591 BNX2X_ERR("VF[%d] FLR error: rc %d\n", vf->abs_vfid, vfop->rc);
1593 vf->flr_clnup_stage = VF_FLR_ACK;
1594 bnx2x_vfop_end(bp, vf, vfop);
1595 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1598 static int bnx2x_vfop_flr_cmd(struct bnx2x *bp,
1599 struct bnx2x_virtf *vf,
1600 vfop_handler_t done)
1602 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
1604 vfop->args.qx.qid = -1; /* loop */
1605 bnx2x_vfop_opset(BNX2X_VFOP_FLR_QUEUES,
1606 bnx2x_vfop_flr, done);
1607 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_flr, false);
1612 static void bnx2x_vf_flr_clnup(struct bnx2x *bp, struct bnx2x_virtf *prev_vf)
1614 int i = prev_vf ? prev_vf->index + 1 : 0;
1615 struct bnx2x_virtf *vf;
1617 /* find next VF to cleanup */
1620 i < BNX2X_NR_VIRTFN(bp) &&
1621 (bnx2x_vf(bp, i, state) != VF_RESET ||
1622 bnx2x_vf(bp, i, flr_clnup_stage) != VF_FLR_CLN);
1626 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n", i,
1627 BNX2X_NR_VIRTFN(bp));
1629 if (i < BNX2X_NR_VIRTFN(bp)) {
1632 /* lock the vf pf channel */
1633 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
1635 /* invoke the VF FLR SM */
1636 if (bnx2x_vfop_flr_cmd(bp, vf, bnx2x_vf_flr_clnup)) {
1637 BNX2X_ERR("VF[%d]: FLR cleanup failed -ENOMEM\n",
1640 /* mark the VF to be ACKED and continue */
1641 vf->flr_clnup_stage = VF_FLR_ACK;
1642 goto next_vf_to_clean;
1647 /* we are done, update vf records */
1648 for_each_vf(bp, i) {
1651 if (vf->flr_clnup_stage != VF_FLR_ACK)
1654 vf->flr_clnup_stage = VF_FLR_EPILOG;
1657 /* Acknowledge the handled VFs.
1658 * we are acknowledge all the vfs which an flr was requested for, even
1659 * if amongst them there are such that we never opened, since the mcp
1660 * will interrupt us immediately again if we only ack some of the bits,
1661 * resulting in an endless loop. This can happen for example in KVM
1662 * where an 'all ones' flr request is sometimes given by hyper visor
1664 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
1665 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1666 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1667 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
1668 bp->vfdb->flrd_vfs[i]);
1670 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
1672 /* clear the acked bits - better yet if the MCP implemented
1673 * write to clear semantics
1675 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1676 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
1679 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
1683 /* Read FLR'd VFs */
1684 for (i = 0; i < FLRD_VFS_DWORDS; i++)
1685 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
1688 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
1689 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
1691 for_each_vf(bp, i) {
1692 struct bnx2x_virtf *vf = BP_VF(bp, i);
1695 if (vf->abs_vfid < 32)
1696 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
1698 reset = bp->vfdb->flrd_vfs[1] &
1699 (1 << (vf->abs_vfid - 32));
1702 /* set as reset and ready for cleanup */
1703 vf->state = VF_RESET;
1704 vf->flr_clnup_stage = VF_FLR_CLN;
1707 "Initiating Final cleanup for VF %d\n",
1712 /* do the FLR cleanup for all marked VFs*/
1713 bnx2x_vf_flr_clnup(bp, NULL);
1716 /* IOV global initialization routines */
1717 void bnx2x_iov_init_dq(struct bnx2x *bp)
1722 /* Set the DQ such that the CID reflect the abs_vfid */
1723 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1724 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1726 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1729 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1731 /* The VF window size is the log2 of the max number of CIDs per VF */
1732 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1734 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1735 * the Pf doorbell size although the 2 are independent.
1737 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
1738 BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
1740 /* No security checks for now -
1741 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1742 * CID range 0 - 0x1ffff
1744 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1745 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1746 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1747 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1749 /* set the number of VF allowed doorbells to the full DQ range */
1750 REG_WR(bp, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000);
1752 /* set the VF doorbell threshold */
1753 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
1756 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1758 DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
1762 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1765 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1767 struct pci_dev *dev = bp->pdev;
1768 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1770 return dev->bus->number + ((dev->devfn + iov->offset +
1771 iov->stride * vfid) >> 8);
1774 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1776 struct pci_dev *dev = bp->pdev;
1777 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1779 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1782 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1785 struct pci_dev *dev = bp->pdev;
1786 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1788 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1789 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1790 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1793 vf->bars[n].bar = start + size * vf->abs_vfid;
1794 vf->bars[n].size = size;
1798 static int bnx2x_ari_enabled(struct pci_dev *dev)
1800 return dev->bus->self && dev->bus->self->ari_enabled;
1804 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1810 /* IGU in normal mode - read CAM */
1811 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1812 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1813 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1815 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1816 if (!(fid & IGU_FID_ENCODE_IS_PF))
1817 bnx2x_vf_set_igu_info(bp, sb_id,
1818 (fid & IGU_FID_VF_NUM_MASK));
1820 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1821 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1822 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1823 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1824 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1828 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1831 kfree(bp->vfdb->vfqs);
1832 kfree(bp->vfdb->vfs);
1838 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1841 struct pci_dev *dev = bp->pdev;
1843 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1845 BNX2X_ERR("failed to find SRIOV capability in device\n");
1850 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1851 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1852 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1853 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1854 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1855 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1856 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1857 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1858 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1863 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1867 /* read the SRIOV capability structure
1868 * The fields can be read via configuration read or
1869 * directly from the device (starting at offset PCICFG_OFFSET)
1871 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1874 /* get the number of SRIOV bars */
1877 /* read the first_vfid */
1878 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1879 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1880 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1883 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1885 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1886 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1891 static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1898 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1903 /* must be called after PF bars are mapped */
1904 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1908 struct bnx2x_sriov *iov;
1909 struct pci_dev *dev = bp->pdev;
1917 /* verify sriov capability is present in configuration space */
1918 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1921 /* verify chip revision */
1922 if (CHIP_IS_E1x(bp))
1925 /* check if SRIOV support is turned off */
1929 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1930 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1931 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1932 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1936 /* SRIOV can be enabled only with MSIX */
1937 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1938 int_mode_param == BNX2X_INT_MODE_INTX) {
1939 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1944 /* verify ari is enabled */
1945 if (!bnx2x_ari_enabled(bp->pdev)) {
1946 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1950 /* verify igu is in normal mode */
1951 if (CHIP_INT_MODE_IS_BC(bp)) {
1952 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1956 /* allocate the vfs database */
1957 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1959 BNX2X_ERR("failed to allocate vf database\n");
1964 /* get the sriov info - Linux already collected all the pertinent
1965 * information, however the sriov structure is for the private use
1966 * of the pci module. Also we want this information regardless
1967 * of the hyper-visor.
1969 iov = &(bp->vfdb->sriov);
1970 err = bnx2x_sriov_info(bp, iov);
1974 /* SR-IOV capability was enabled but there are no VFs*/
1975 if (iov->total == 0)
1978 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1980 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1981 num_vfs_param, iov->nr_virtfn);
1983 /* allocate the vf array */
1984 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1985 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1986 if (!bp->vfdb->vfs) {
1987 BNX2X_ERR("failed to allocate vf array\n");
1992 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1993 for_each_vf(bp, i) {
1994 bnx2x_vf(bp, i, index) = i;
1995 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1996 bnx2x_vf(bp, i, state) = VF_FREE;
1997 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1998 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1999 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
2002 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
2003 bnx2x_get_vf_igu_cam_info(bp);
2005 /* get the total queue count and allocate the global queue arrays */
2006 qcount = bnx2x_iov_get_max_queue_count(bp);
2008 /* allocate the queue arrays for all VFs */
2009 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
2011 if (!bp->vfdb->vfqs) {
2012 BNX2X_ERR("failed to allocate vf queue array\n");
2019 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
2020 __bnx2x_iov_free_vfdb(bp);
2024 void bnx2x_iov_remove_one(struct bnx2x *bp)
2026 /* if SRIOV is not enabled there's nothing to do */
2030 DP(BNX2X_MSG_IOV, "about to call disable sriov\n");
2031 pci_disable_sriov(bp->pdev);
2032 DP(BNX2X_MSG_IOV, "sriov disabled\n");
2034 /* free vf database */
2035 __bnx2x_iov_free_vfdb(bp);
2038 void bnx2x_iov_free_mem(struct bnx2x *bp)
2045 /* free vfs hw contexts */
2046 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2047 struct hw_dma *cxt = &bp->vfdb->context[i];
2048 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
2051 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
2052 BP_VFDB(bp)->sp_dma.mapping,
2053 BP_VFDB(bp)->sp_dma.size);
2055 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
2056 BP_VF_MBX_DMA(bp)->mapping,
2057 BP_VF_MBX_DMA(bp)->size);
2059 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
2060 BP_VF_BULLETIN_DMA(bp)->mapping,
2061 BP_VF_BULLETIN_DMA(bp)->size);
2064 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
2072 /* allocate vfs hw contexts */
2073 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
2074 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
2076 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2077 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
2078 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
2081 BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
2086 tot_size -= cxt->size;
2089 /* allocate vfs ramrods dma memory - client_init and set_mac */
2090 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
2091 BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
2093 BP_VFDB(bp)->sp_dma.size = tot_size;
2095 /* allocate mailboxes */
2096 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
2097 BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
2099 BP_VF_MBX_DMA(bp)->size = tot_size;
2101 /* allocate local bulletin boards */
2102 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
2103 BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
2104 &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
2105 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
2113 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
2114 struct bnx2x_vf_queue *q)
2116 u8 cl_id = vfq_cl_id(vf, q);
2117 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
2118 unsigned long q_type = 0;
2120 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
2121 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
2123 /* Queue State object */
2124 bnx2x_init_queue_obj(bp, &q->sp_obj,
2125 cl_id, &q->cid, 1, func_id,
2126 bnx2x_vf_sp(bp, vf, q_data),
2127 bnx2x_vf_sp_map(bp, vf, q_data),
2131 "initialized vf %d's queue object. func id set to %d\n",
2132 vf->abs_vfid, q->sp_obj.func_id);
2134 /* mac/vlan objects are per queue, but only those
2135 * that belong to the leading queue are initialized
2137 if (vfq_is_leading(q)) {
2139 bnx2x_init_mac_obj(bp, &q->mac_obj,
2140 cl_id, q->cid, func_id,
2141 bnx2x_vf_sp(bp, vf, mac_rdata),
2142 bnx2x_vf_sp_map(bp, vf, mac_rdata),
2143 BNX2X_FILTER_MAC_PENDING,
2145 BNX2X_OBJ_TYPE_RX_TX,
2148 bnx2x_init_vlan_obj(bp, &q->vlan_obj,
2149 cl_id, q->cid, func_id,
2150 bnx2x_vf_sp(bp, vf, vlan_rdata),
2151 bnx2x_vf_sp_map(bp, vf, vlan_rdata),
2152 BNX2X_FILTER_VLAN_PENDING,
2154 BNX2X_OBJ_TYPE_RX_TX,
2158 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
2159 q->cid, func_id, func_id,
2160 bnx2x_vf_sp(bp, vf, mcast_rdata),
2161 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2162 BNX2X_FILTER_MCAST_PENDING,
2164 BNX2X_OBJ_TYPE_RX_TX);
2166 vf->leading_rss = cl_id;
2170 /* called by bnx2x_nic_load */
2171 int bnx2x_iov_nic_init(struct bnx2x *bp)
2173 int vfid, qcount, i;
2175 if (!IS_SRIOV(bp)) {
2176 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
2180 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
2182 /* let FLR complete ... */
2185 /* initialize vf database */
2186 for_each_vf(bp, vfid) {
2187 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
2189 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
2192 union cdu_context *base_cxt = (union cdu_context *)
2193 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2194 (base_vf_cid & (ILT_PAGE_CIDS-1));
2197 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
2198 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
2199 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
2201 /* init statically provisioned resources */
2202 bnx2x_iov_static_resc(bp, &vf->alloc_resc);
2204 /* queues are initialized during VF-ACQUIRE */
2206 /* reserve the vf vlan credit */
2207 bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
2209 vf->filter_state = 0;
2210 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
2212 /* init mcast object - This object will be re-initialized
2213 * during VF-ACQUIRE with the proper cl_id and cid.
2214 * It needs to be initialized here so that it can be safely
2215 * handled by a subsequent FLR flow.
2217 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
2219 bnx2x_vf_sp(bp, vf, mcast_rdata),
2220 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
2221 BNX2X_FILTER_MCAST_PENDING,
2223 BNX2X_OBJ_TYPE_RX_TX);
2225 /* set the mailbox message addresses */
2226 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
2227 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
2228 MBX_MSG_ALIGNED_SIZE);
2230 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
2231 vfid * MBX_MSG_ALIGNED_SIZE;
2233 /* Enable vf mailbox */
2234 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
2239 for_each_vf(bp, i) {
2240 struct bnx2x_virtf *vf = BP_VF(bp, i);
2242 /* fill in the BDF and bars */
2243 vf->bus = bnx2x_vf_bus(bp, i);
2244 vf->devfn = bnx2x_vf_devfn(bp, i);
2245 bnx2x_vf_set_bars(bp, vf);
2248 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
2249 vf->abs_vfid, vf->bus, vf->devfn,
2250 (unsigned)vf->bars[0].bar, vf->bars[0].size,
2251 (unsigned)vf->bars[1].bar, vf->bars[1].size,
2252 (unsigned)vf->bars[2].bar, vf->bars[2].size);
2254 /* set local queue arrays */
2255 vf->vfqs = &bp->vfdb->vfqs[qcount];
2256 qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
2262 /* called by bnx2x_chip_cleanup */
2263 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
2270 /* release all the VFs */
2272 bnx2x_vf_release(bp, BP_VF(bp, i), true); /* blocking */
2277 /* called by bnx2x_init_hw_func, returns the next ilt line */
2278 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
2281 struct bnx2x_ilt *ilt = BP_ILT(bp);
2286 /* set vfs ilt lines */
2287 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
2288 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
2290 ilt->lines[line+i].page = hw_cxt->addr;
2291 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
2292 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
2297 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
2299 return ((cid >= BNX2X_FIRST_VF_CID) &&
2300 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
2304 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
2305 struct bnx2x_vf_queue *vfq,
2306 union event_ring_elem *elem)
2308 unsigned long ramrod_flags = 0;
2311 /* Always push next commands out, don't wait here */
2312 set_bit(RAMROD_CONT, &ramrod_flags);
2314 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
2315 case BNX2X_FILTER_MAC_PENDING:
2316 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
2319 case BNX2X_FILTER_VLAN_PENDING:
2320 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
2324 BNX2X_ERR("Unsupported classification command: %d\n",
2325 elem->message.data.eth_event.echo);
2329 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
2331 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
2335 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
2336 struct bnx2x_virtf *vf)
2338 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2341 rparam.mcast_obj = &vf->mcast_obj;
2342 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
2344 /* If there are pending mcast commands - send them */
2345 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
2346 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2348 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
2354 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
2355 struct bnx2x_virtf *vf)
2357 smp_mb__before_clear_bit();
2358 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
2359 smp_mb__after_clear_bit();
2362 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2364 struct bnx2x_virtf *vf;
2365 int qidx = 0, abs_vfid;
2372 /* first get the cid - the only events we handle here are cfc-delete
2373 * and set-mac completion
2375 opcode = elem->message.opcode;
2378 case EVENT_RING_OPCODE_CFC_DEL:
2379 cid = SW_CID((__force __le32)
2380 elem->message.data.cfc_del_event.cid);
2381 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
2383 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2384 case EVENT_RING_OPCODE_MULTICAST_RULES:
2385 case EVENT_RING_OPCODE_FILTERS_RULES:
2386 cid = (elem->message.data.eth_event.echo &
2388 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
2390 case EVENT_RING_OPCODE_VF_FLR:
2391 abs_vfid = elem->message.data.vf_flr_event.vf_id;
2392 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
2395 case EVENT_RING_OPCODE_MALICIOUS_VF:
2396 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2397 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2398 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2404 /* check if the cid is the VF range */
2405 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
2406 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
2410 /* extract vf and rxq index from vf_cid - relies on the following:
2411 * 1. vfid on cid reflects the true abs_vfid
2412 * 2. The max number of VFs (per path) is 64
2414 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
2415 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2417 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
2420 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
2426 case EVENT_RING_OPCODE_CFC_DEL:
2427 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
2428 vf->abs_vfid, qidx);
2429 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
2432 BNX2X_Q_CMD_CFC_DEL);
2434 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
2435 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
2436 vf->abs_vfid, qidx);
2437 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
2439 case EVENT_RING_OPCODE_MULTICAST_RULES:
2440 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
2441 vf->abs_vfid, qidx);
2442 bnx2x_vf_handle_mcast_eqe(bp, vf);
2444 case EVENT_RING_OPCODE_FILTERS_RULES:
2445 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
2446 vf->abs_vfid, qidx);
2447 bnx2x_vf_handle_filters_eqe(bp, vf);
2449 case EVENT_RING_OPCODE_VF_FLR:
2450 DP(BNX2X_MSG_IOV, "got VF [%d] FLR notification\n",
2452 /* Do nothing for now */
2454 case EVENT_RING_OPCODE_MALICIOUS_VF:
2455 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2456 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2457 /* Do nothing for now */
2460 /* SRIOV: reschedule any 'in_progress' operations */
2461 bnx2x_iov_sp_event(bp, cid, false);
2466 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
2468 /* extract the vf from vf_cid - relies on the following:
2469 * 1. vfid on cid reflects the true abs_vfid
2470 * 2. The max number of VFs (per path) is 64
2472 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
2473 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
2476 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
2477 struct bnx2x_queue_sp_obj **q_obj)
2479 struct bnx2x_virtf *vf;
2484 vf = bnx2x_vf_by_cid(bp, vf_cid);
2487 /* extract queue index from vf_cid - relies on the following:
2488 * 1. vfid on cid reflects the true abs_vfid
2489 * 2. The max number of VFs (per path) is 64
2491 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
2492 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
2494 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
2498 void bnx2x_iov_sp_event(struct bnx2x *bp, int vf_cid, bool queue_work)
2500 struct bnx2x_virtf *vf;
2502 /* check if the cid is the VF range */
2503 if (!IS_SRIOV(bp) || !bnx2x_iov_is_vf_cid(bp, vf_cid))
2506 vf = bnx2x_vf_by_cid(bp, vf_cid);
2508 /* set in_progress flag */
2509 atomic_set(&vf->op_in_progress, 1);
2511 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2515 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
2518 int first_queue_query_index, num_queues_req;
2519 dma_addr_t cur_data_offset;
2520 struct stats_query_entry *cur_query_entry;
2522 bool is_fcoe = false;
2530 /* fcoe adds one global request and one queue request */
2531 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
2532 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
2536 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
2537 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
2538 first_queue_query_index + num_queues_req);
2540 cur_data_offset = bp->fw_stats_data_mapping +
2541 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
2542 num_queues_req * sizeof(struct per_queue_stats);
2544 cur_query_entry = &bp->fw_stats_req->
2545 query[first_queue_query_index + num_queues_req];
2547 for_each_vf(bp, i) {
2549 struct bnx2x_virtf *vf = BP_VF(bp, i);
2551 if (vf->state != VF_ENABLED) {
2553 "vf %d not enabled so no stats for it\n",
2558 DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
2559 for_each_vfq(vf, j) {
2560 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
2562 /* collect stats fro active queues only */
2563 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
2564 BNX2X_Q_LOGICAL_STATE_STOPPED)
2567 /* create stats query entry for this queue */
2568 cur_query_entry->kind = STATS_TYPE_QUEUE;
2569 cur_query_entry->index = vfq_cl_id(vf, rxq);
2570 cur_query_entry->funcID =
2571 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
2572 cur_query_entry->address.hi =
2573 cpu_to_le32(U64_HI(vf->fw_stat_map));
2574 cur_query_entry->address.lo =
2575 cpu_to_le32(U64_LO(vf->fw_stat_map));
2577 "added address %x %x for vf %d queue %d client %d\n",
2578 cur_query_entry->address.hi,
2579 cur_query_entry->address.lo, cur_query_entry->funcID,
2580 j, cur_query_entry->index);
2582 cur_data_offset += sizeof(struct per_queue_stats);
2586 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
2589 void bnx2x_iov_sp_task(struct bnx2x *bp)
2595 /* Iterate over all VFs and invoke state transition for VFs with
2596 * 'in-progress' slow-path operations
2598 DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
2599 for_each_vf(bp, i) {
2600 struct bnx2x_virtf *vf = BP_VF(bp, i);
2602 if (!list_empty(&vf->op_list_head) &&
2603 atomic_read(&vf->op_in_progress)) {
2604 DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
2605 bnx2x_vfop_cur(bp, vf)->transition(bp, vf);
2611 struct bnx2x_virtf *__vf_from_stat_id(struct bnx2x *bp, u8 stat_id)
2614 struct bnx2x_virtf *vf = NULL;
2616 for_each_vf(bp, i) {
2618 if (stat_id >= vf->igu_base_id &&
2619 stat_id < vf->igu_base_id + vf_sb_count(vf))
2625 /* VF API helpers */
2626 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
2629 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
2630 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
2632 REG_WR(bp, reg, val);
2635 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
2640 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2641 vfq_qzone_id(vf, vfq_get(vf, i)), false);
2644 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
2648 /* clear the VF configuration - pretend */
2649 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
2650 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
2651 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
2652 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
2653 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
2654 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2657 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
2659 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
2660 BNX2X_VF_MAX_QUEUES);
2664 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
2665 struct vf_pf_resc_request *req_resc)
2667 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2668 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2670 return ((req_resc->num_rxqs <= rxq_cnt) &&
2671 (req_resc->num_txqs <= txq_cnt) &&
2672 (req_resc->num_sbs <= vf_sb_count(vf)) &&
2673 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2674 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2678 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2679 struct vf_pf_resc_request *resc)
2681 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2684 union cdu_context *base_cxt = (union cdu_context *)
2685 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2686 (base_vf_cid & (ILT_PAGE_CIDS-1));
2689 /* if state is 'acquired' the VF was not released or FLR'd, in
2690 * this case the returned resources match the acquired already
2691 * acquired resources. Verify that the requested numbers do
2692 * not exceed the already acquired numbers.
2694 if (vf->state == VF_ACQUIRED) {
2695 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2698 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2699 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2706 /* Otherwise vf state must be 'free' or 'reset' */
2707 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2708 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2709 vf->abs_vfid, vf->state);
2713 /* static allocation:
2714 * the global maximum number are fixed per VF. Fail the request if
2715 * requested number exceed these globals
2717 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2719 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2720 /* set the max resource in the vf */
2724 /* Set resources counters - 0 request means max available */
2725 vf_sb_count(vf) = resc->num_sbs;
2726 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2727 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2728 if (resc->num_mac_filters)
2729 vf_mac_rules_cnt(vf) = resc->num_mac_filters;
2730 if (resc->num_vlan_filters)
2731 vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
2734 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2735 vf_sb_count(vf), vf_rxq_count(vf),
2736 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2737 vf_vlan_rules_cnt(vf));
2739 /* Initialize the queues */
2741 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2745 for_each_vfq(vf, i) {
2746 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2749 DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
2754 q->cxt = &((base_cxt + i)->eth);
2755 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2757 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2758 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2760 /* init SP objects */
2761 bnx2x_vfq_init(bp, vf, q);
2763 vf->state = VF_ACQUIRED;
2767 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2769 struct bnx2x_func_init_params func_init = {0};
2773 /* the sb resources are initialized at this point, do the
2774 * FW/HW initializations
2776 for_each_vf_sb(vf, i)
2777 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2778 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2781 if (vf->state != VF_ACQUIRED) {
2782 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2783 vf->abs_vfid, vf->state);
2787 /* let FLR complete ... */
2790 /* FLR cleanup epilogue */
2791 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2794 /* reset IGU VF statistics: MSIX */
2795 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2798 if (vf->cfg_flags & VF_CFG_STATS)
2799 flags |= (FUNC_FLG_STATS | FUNC_FLG_SPQ);
2801 if (vf->cfg_flags & VF_CFG_TPA)
2802 flags |= FUNC_FLG_TPA;
2804 if (is_vf_multi(vf))
2805 flags |= FUNC_FLG_RSS;
2807 /* function setup */
2808 func_init.func_flgs = flags;
2809 func_init.pf_id = BP_FUNC(bp);
2810 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2811 func_init.fw_stat_map = vf->fw_stat_map;
2812 func_init.spq_map = vf->spq_map;
2813 func_init.spq_prod = 0;
2814 bnx2x_func_init(bp, &func_init);
2817 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2818 bnx2x_vf_enable_traffic(bp, vf);
2820 /* queue protection table */
2822 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2823 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2825 vf->state = VF_ENABLED;
2827 /* update vf bulletin board */
2828 bnx2x_post_vf_bulletin(bp, vf->index);
2833 /* VFOP close (teardown the queues, delete mcasts and close HW) */
2834 static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2836 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2837 struct bnx2x_vfop_args_qx *qx = &vfop->args.qx;
2838 enum bnx2x_vfop_close_state state = vfop->state;
2839 struct bnx2x_vfop_cmd cmd = {
2840 .done = bnx2x_vfop_close,
2847 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
2850 case BNX2X_VFOP_CLOSE_QUEUES:
2852 if (++(qx->qid) < vf_rxq_count(vf)) {
2853 vfop->rc = bnx2x_vfop_qdown_cmd(bp, vf, &cmd, qx->qid);
2859 /* remove multicasts */
2860 vfop->state = BNX2X_VFOP_CLOSE_HW;
2861 vfop->rc = bnx2x_vfop_mcast_cmd(bp, vf, &cmd, NULL, 0, false);
2866 case BNX2X_VFOP_CLOSE_HW:
2868 /* disable the interrupts */
2869 DP(BNX2X_MSG_IOV, "disabling igu\n");
2870 bnx2x_vf_igu_disable(bp, vf);
2872 /* disable the VF */
2873 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2874 bnx2x_vf_clr_qtbl(bp, vf);
2878 bnx2x_vfop_default(state);
2881 BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
2883 vf->state = VF_ACQUIRED;
2884 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2885 bnx2x_vfop_end(bp, vf, vfop);
2888 int bnx2x_vfop_close_cmd(struct bnx2x *bp,
2889 struct bnx2x_virtf *vf,
2890 struct bnx2x_vfop_cmd *cmd)
2892 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2894 vfop->args.qx.qid = -1; /* loop */
2895 bnx2x_vfop_opset(BNX2X_VFOP_CLOSE_QUEUES,
2896 bnx2x_vfop_close, cmd->done);
2897 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_close,
2903 /* VF release can be called either: 1. The VF was acquired but
2904 * not enabled 2. the vf was enabled or in the process of being
2907 static void bnx2x_vfop_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2909 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
2910 struct bnx2x_vfop_cmd cmd = {
2911 .done = bnx2x_vfop_release,
2915 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2920 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2921 vf->state == VF_FREE ? "Free" :
2922 vf->state == VF_ACQUIRED ? "Acquired" :
2923 vf->state == VF_ENABLED ? "Enabled" :
2924 vf->state == VF_RESET ? "Reset" :
2927 switch (vf->state) {
2929 vfop->rc = bnx2x_vfop_close_cmd(bp, vf, &cmd);
2935 DP(BNX2X_MSG_IOV, "about to free resources\n");
2936 bnx2x_vf_free_resc(bp, vf);
2937 DP(BNX2X_MSG_IOV, "vfop->rc %d\n", vfop->rc);
2945 bnx2x_vfop_default(vf->state);
2948 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, vfop->rc);
2950 bnx2x_vfop_end(bp, vf, vfop);
2953 int bnx2x_vfop_release_cmd(struct bnx2x *bp,
2954 struct bnx2x_virtf *vf,
2955 struct bnx2x_vfop_cmd *cmd)
2957 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
2959 bnx2x_vfop_opset(-1, /* use vf->state */
2960 bnx2x_vfop_release, cmd->done);
2961 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_release,
2967 /* VF release ~ VF close + VF release-resources
2968 * Release is the ultimate SW shutdown and is called whenever an
2969 * irrecoverable error is encountered.
2971 void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
2973 struct bnx2x_vfop_cmd cmd = {
2978 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2980 rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
2983 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2987 static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
2988 struct bnx2x_virtf *vf, u32 *sbdf)
2990 *sbdf = vf->devfn | (vf->bus << 8);
2993 static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
2994 struct bnx2x_vf_bar_info *bar_info)
2998 bar_info->nr_bars = bp->vfdb->sriov.nres;
2999 for (n = 0; n < bar_info->nr_bars; n++)
3000 bar_info->bars[n] = vf->bars[n];
3003 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3004 enum channel_tlvs tlv)
3006 /* lock the channel */
3007 mutex_lock(&vf->op_mutex);
3009 /* record the locking op */
3010 vf->op_current = tlv;
3013 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
3017 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3018 enum channel_tlvs expected_tlv)
3020 WARN(expected_tlv != vf->op_current,
3021 "lock mismatch: expected %d found %d", expected_tlv,
3024 /* lock the channel */
3025 mutex_unlock(&vf->op_mutex);
3027 /* log the unlock */
3028 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
3029 vf->abs_vfid, vf->op_current);
3031 /* record the locking op */
3032 vf->op_current = CHANNEL_TLV_NONE;
3035 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3037 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3039 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3040 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3042 /* HW channel is only operational when PF is up */
3043 if (bp->state != BNX2X_STATE_OPEN) {
3044 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
3048 /* we are always bound by the total_vfs in the configuration space */
3049 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3050 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3051 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3052 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3055 bp->requested_nr_virtfn = num_vfs_param;
3056 if (num_vfs_param == 0) {
3057 pci_disable_sriov(dev);
3060 return bnx2x_enable_sriov(bp);
3064 int bnx2x_enable_sriov(struct bnx2x *bp)
3066 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3068 rc = pci_enable_sriov(bp->pdev, req_vfs);
3070 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3073 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3077 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3080 struct pf_vf_bulletin_content *bulletin;
3082 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3083 for_each_vf(bp, vfidx) {
3084 bulletin = BP_VF_BULLETIN(bp, vfidx);
3085 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3086 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3090 void bnx2x_disable_sriov(struct bnx2x *bp)
3092 pci_disable_sriov(bp->pdev);
3095 static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3096 struct bnx2x_virtf *vf)
3098 if (bp->state != BNX2X_STATE_OPEN) {
3099 BNX2X_ERR("vf ndo called though PF is down\n");
3103 if (!IS_SRIOV(bp)) {
3104 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3108 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3109 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3110 vfidx, BNX2X_NR_VIRTFN(bp));
3115 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3123 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3124 struct ifla_vf_info *ivi)
3126 struct bnx2x *bp = netdev_priv(dev);
3127 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3128 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3129 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3130 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3134 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3137 if (!mac_obj || !vlan_obj || !bulletin) {
3138 BNX2X_ERR("VF partially initialized\n");
3144 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3145 ivi->spoofchk = 1; /*always enabled */
3146 if (vf->state == VF_ENABLED) {
3147 /* mac and vlan are in vlan_mac objects */
3148 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3150 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
3154 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3155 /* mac configured by ndo so its in bulletin board */
3156 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3158 /* function has not been loaded yet. Show mac as 0s */
3159 memset(&ivi->mac, 0, ETH_ALEN);
3162 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3163 /* vlan configured by ndo so its in bulletin board */
3164 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3166 /* function has not been loaded yet. Show vlans as 0s */
3167 memset(&ivi->vlan, 0, VLAN_HLEN);
3173 /* New mac for VF. Consider these cases:
3174 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
3175 * supply at acquire.
3176 * 2. VF has already been acquired but has not yet initialized - store in local
3177 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
3178 * will configure this mac when it is ready.
3179 * 3. VF has already initialized but has not yet setup a queue - post the new
3180 * mac on VF's bulletin board right now. VF will configure this mac when it
3182 * 4. VF has already set a queue - delete any macs already configured for this
3183 * queue and manually config the new mac.
3184 * In any event, once this function has been called refuse any attempts by the
3185 * VF to configure any mac for itself except for this mac. In case of a race
3186 * where the VF fails to see the new post on its bulletin board before sending a
3187 * mac configuration request, the PF will simply fail the request and VF can try
3188 * again after consulting its bulletin board.
3190 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3192 struct bnx2x *bp = netdev_priv(dev);
3193 int rc, q_logical_state;
3194 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3195 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3198 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3201 if (!is_valid_ether_addr(mac)) {
3202 BNX2X_ERR("mac address invalid\n");
3206 /* update PF's copy of the VF's bulletin. Will no longer accept mac
3207 * configuration requests from vf unless match this mac
3209 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
3210 memcpy(bulletin->mac, mac, ETH_ALEN);
3212 /* Post update on VF's bulletin board */
3213 rc = bnx2x_post_vf_bulletin(bp, vfidx);
3215 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
3219 /* is vf initialized and queue set up? */
3221 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3222 if (vf->state == VF_ENABLED &&
3223 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3224 /* configure the mac in device on this vf's queue */
3225 unsigned long ramrod_flags = 0;
3226 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3228 /* must lock vfpf channel to protect against vf flows */
3229 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3231 /* remove existing eth macs */
3232 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
3234 BNX2X_ERR("failed to delete eth macs\n");
3238 /* remove existing uc list macs */
3239 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
3241 BNX2X_ERR("failed to delete uc_list macs\n");
3245 /* configure the new mac to device */
3246 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3247 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3248 BNX2X_ETH_MAC, &ramrod_flags);
3250 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3256 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3258 struct bnx2x *bp = netdev_priv(dev);
3259 int rc, q_logical_state;
3260 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3261 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3264 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3269 BNX2X_ERR("illegal vlan value %d\n", vlan);
3273 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3276 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3277 * to the VF since it doesn't have anything to do with it. But it useful
3278 * to store it here in case the VF is not up yet and we can only
3279 * configure the vlan later when it does.
3281 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3282 bulletin->vlan = vlan;
3284 /* is vf initialized and queue set up? */
3286 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3287 if (vf->state == VF_ENABLED &&
3288 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3289 /* configure the vlan in device on this vf's queue */
3290 unsigned long ramrod_flags = 0;
3291 unsigned long vlan_mac_flags = 0;
3292 struct bnx2x_vlan_mac_obj *vlan_obj =
3293 &bnx2x_vfq(vf, 0, vlan_obj);
3294 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3295 struct bnx2x_queue_state_params q_params = {NULL};
3296 struct bnx2x_queue_update_params *update_params;
3298 memset(&ramrod_param, 0, sizeof(ramrod_param));
3300 /* must lock vfpf channel to protect against vf flows */
3301 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3303 /* remove existing vlans */
3304 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3305 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3308 BNX2X_ERR("failed to delete vlans\n");
3312 /* send queue update ramrod to configure default vlan and silent
3315 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3316 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3317 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
3318 update_params = &q_params.params.update;
3319 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3320 &update_params->update_flags);
3321 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3322 &update_params->update_flags);
3325 /* if vlan is 0 then we want to leave the VF traffic
3326 * untagged, and leave the incoming traffic untouched
3327 * (i.e. do not remove any vlan tags).
3329 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3330 &update_params->update_flags);
3331 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3332 &update_params->update_flags);
3334 /* configure the new vlan to device */
3335 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3336 ramrod_param.vlan_mac_obj = vlan_obj;
3337 ramrod_param.ramrod_flags = ramrod_flags;
3338 ramrod_param.user_req.u.vlan.vlan = vlan;
3339 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3340 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3342 BNX2X_ERR("failed to configure vlan\n");
3346 /* configure default vlan to vf queue and set silent
3347 * vlan removal (the vf remains unaware of this vlan).
3349 update_params = &q_params.params.update;
3350 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3351 &update_params->update_flags);
3352 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3353 &update_params->update_flags);
3354 update_params->def_vlan = vlan;
3357 /* Update the Queue state */
3358 rc = bnx2x_queue_state_change(bp, &q_params);
3360 BNX2X_ERR("Failed to configure default VLAN\n");
3364 /* clear the flag indicating that this VF needs its vlan
3365 * (will only be set if the HV configured th Vlan before vf was
3366 * and we were called because the VF came up later
3368 vf->cfg_flags &= ~VF_CFG_VLAN;
3370 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3375 /* crc is the first field in the bulletin board. Compute the crc over the
3376 * entire bulletin board excluding the crc field itself. Use the length field
3377 * as the Bulletin Board was posted by a PF with possibly a different version
3378 * from the vf which will sample it. Therefore, the length is computed by the
3379 * PF and the used blindly by the VF.
3381 u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
3382 struct pf_vf_bulletin_content *bulletin)
3384 return crc32(BULLETIN_CRC_SEED,
3385 ((u8 *)bulletin) + sizeof(bulletin->crc),
3386 bulletin->length - sizeof(bulletin->crc));
3389 /* Check for new posts on the bulletin board */
3390 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3392 struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
3395 /* bulletin board hasn't changed since last sample */
3396 if (bp->old_bulletin.version == bulletin.version)
3397 return PFVF_BULLETIN_UNCHANGED;
3399 /* validate crc of new bulletin board */
3400 if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
3401 /* sampling structure in mid post may result with corrupted data
3402 * validate crc to ensure coherency.
3404 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
3405 bulletin = bp->pf2vf_bulletin->content;
3406 if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
3409 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
3411 bnx2x_crc_vf_bulletin(bp, &bulletin));
3413 if (attempts >= BULLETIN_ATTEMPTS) {
3414 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
3416 return PFVF_BULLETIN_CRC_ERR;
3420 /* the mac address in bulletin board is valid and is new */
3421 if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
3422 memcmp(bulletin.mac, bp->old_bulletin.mac, ETH_ALEN)) {
3423 /* update new mac to net device */
3424 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3427 /* the vlan in bulletin board is valid and is new */
3428 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3429 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3431 /* copy new bulletin board to bp */
3432 bp->old_bulletin = bulletin;
3434 return PFVF_BULLETIN_UPDATED;
3437 void bnx2x_timer_sriov(struct bnx2x *bp)
3439 bnx2x_sample_bulletin(bp);
3441 /* if channel is down we need to self destruct */
3442 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
3443 smp_mb__before_clear_bit();
3444 set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3445 &bp->sp_rtnl_state);
3446 smp_mb__after_clear_bit();
3447 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3451 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3453 /* vf doorbells are embedded within the regview */
3454 return bp->regview + PXP_VF_ADDR_DB_START;
3457 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3459 mutex_init(&bp->vf2pf_mutex);
3461 /* allocate vf2pf mailbox for vf to pf channel */
3462 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3463 sizeof(struct bnx2x_vf_mbx_msg));
3465 /* allocate pf 2 vf bulletin board */
3466 BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
3467 sizeof(union pf_vf_bulletin));
3472 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3473 sizeof(struct bnx2x_vf_mbx_msg));
3474 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
3475 sizeof(union pf_vf_bulletin));
3479 int bnx2x_open_epilog(struct bnx2x *bp)
3481 /* Enable sriov via delayed work. This must be done via delayed work
3482 * because it causes the probe of the vf devices to be run, which invoke
3483 * register_netdevice which must have rtnl lock taken. As we are holding
3484 * the lock right now, that could only work if the probe would not take
3485 * the lock. However, as the probe of the vf may be called from other
3486 * contexts as well (such as passthrough to vm fails) it can't assume
3487 * the lock is being held for it. Using delayed work here allows the
3488 * probe code to simply take the lock (i.e. wait for it to be released
3489 * if it is being held). We only want to do this if the number of VFs
3490 * was set before PF driver was loaded.
3492 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3493 smp_mb__before_clear_bit();
3494 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3495 smp_mb__after_clear_bit();
3496 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3502 void bnx2x_iov_channel_down(struct bnx2x *bp)
3505 struct pf_vf_bulletin_content *bulletin;
3510 for_each_vf(bp, vf_idx) {
3511 /* locate this VFs bulletin board and update the channel down
3514 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3515 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3517 /* update vf bulletin board */
3518 bnx2x_post_vf_bulletin(bp, vf_idx);