1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2015 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
15 #include <linux/etherdevice.h>
18 #include "bnxt_sriov.h"
19 #include "bnxt_ethtool.h"
21 #ifdef CONFIG_BNXT_SRIOV
22 static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
24 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
25 netdev_err(bp->dev, "vf ndo called though PF is down\n");
28 if (!bp->pf.active_vfs) {
29 netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
32 if (vf_id >= bp->pf.max_vfs) {
33 netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
39 int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
41 struct hwrm_func_cfg_input req = {0};
42 struct bnxt *bp = netdev_priv(dev);
43 struct bnxt_vf_info *vf;
44 bool old_setting = false;
48 rc = bnxt_vf_ndo_prep(bp, vf_id);
52 vf = &bp->pf.vf[vf_id];
53 if (vf->flags & BNXT_VF_SPOOFCHK)
55 if (old_setting == setting)
58 func_flags = vf->func_flags;
60 func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
62 func_flags &= ~FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK;
63 /*TODO: if the driver supports VLAN filter on guest VLAN,
64 * the spoof check should also include vlan anti-spoofing
66 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
67 req.fid = cpu_to_le16(vf->fw_fid);
68 req.flags = cpu_to_le32(func_flags);
69 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
71 vf->func_flags = func_flags;
73 vf->flags |= BNXT_VF_SPOOFCHK;
75 vf->flags &= ~BNXT_VF_SPOOFCHK;
80 int bnxt_get_vf_config(struct net_device *dev, int vf_id,
81 struct ifla_vf_info *ivi)
83 struct bnxt *bp = netdev_priv(dev);
84 struct bnxt_vf_info *vf;
87 rc = bnxt_vf_ndo_prep(bp, vf_id);
92 vf = &bp->pf.vf[vf_id];
94 memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
95 ivi->max_tx_rate = vf->max_tx_rate;
96 ivi->min_tx_rate = vf->min_tx_rate;
98 ivi->qos = vf->flags & BNXT_VF_QOS;
99 ivi->spoofchk = vf->flags & BNXT_VF_SPOOFCHK;
100 if (!(vf->flags & BNXT_VF_LINK_FORCED))
101 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
102 else if (vf->flags & BNXT_VF_LINK_UP)
103 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
105 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
110 int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
112 struct hwrm_func_cfg_input req = {0};
113 struct bnxt *bp = netdev_priv(dev);
114 struct bnxt_vf_info *vf;
117 rc = bnxt_vf_ndo_prep(bp, vf_id);
120 /* reject bc or mc mac addr, zero mac addr means allow
121 * VF to use its own mac addr
123 if (is_multicast_ether_addr(mac)) {
124 netdev_err(dev, "Invalid VF ethernet address\n");
127 vf = &bp->pf.vf[vf_id];
129 memcpy(vf->mac_addr, mac, ETH_ALEN);
130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
131 req.fid = cpu_to_le16(vf->fw_fid);
132 req.flags = cpu_to_le32(vf->func_flags);
133 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
134 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
135 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
138 int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
140 struct hwrm_func_cfg_input req = {0};
141 struct bnxt *bp = netdev_priv(dev);
142 struct bnxt_vf_info *vf;
146 rc = bnxt_vf_ndo_prep(bp, vf_id);
150 /* TODO: needed to implement proper handling of user priority,
151 * currently fail the command if there is valid priority
153 if (vlan_id > 4095 || qos)
156 vf = &bp->pf.vf[vf_id];
158 if (vlan_tag == vf->vlan)
161 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
162 req.fid = cpu_to_le16(vf->fw_fid);
163 req.flags = cpu_to_le32(vf->func_flags);
164 req.dflt_vlan = cpu_to_le16(vlan_tag);
165 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
166 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
172 int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
175 struct hwrm_func_cfg_input req = {0};
176 struct bnxt *bp = netdev_priv(dev);
177 struct bnxt_vf_info *vf;
181 rc = bnxt_vf_ndo_prep(bp, vf_id);
185 vf = &bp->pf.vf[vf_id];
186 pf_link_speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
187 if (max_tx_rate > pf_link_speed) {
188 netdev_info(bp->dev, "max tx rate %d exceed PF link speed for VF %d\n",
193 if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) {
194 netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n",
198 if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
200 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
201 req.fid = cpu_to_le16(vf->fw_fid);
202 req.flags = cpu_to_le32(vf->func_flags);
203 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
204 req.max_bw = cpu_to_le32(max_tx_rate);
205 req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW);
206 req.min_bw = cpu_to_le32(min_tx_rate);
207 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
209 vf->min_tx_rate = min_tx_rate;
210 vf->max_tx_rate = max_tx_rate;
215 int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link)
217 struct bnxt *bp = netdev_priv(dev);
218 struct bnxt_vf_info *vf;
221 rc = bnxt_vf_ndo_prep(bp, vf_id);
225 vf = &bp->pf.vf[vf_id];
227 vf->flags &= ~(BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED);
229 case IFLA_VF_LINK_STATE_AUTO:
230 vf->flags |= BNXT_VF_LINK_UP;
232 case IFLA_VF_LINK_STATE_DISABLE:
233 vf->flags |= BNXT_VF_LINK_FORCED;
235 case IFLA_VF_LINK_STATE_ENABLE:
236 vf->flags |= BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED;
239 netdev_err(bp->dev, "Invalid link option\n");
243 /* CHIMP TODO: send msg to VF to update new link state */
248 static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs)
251 struct bnxt_vf_info *vf;
253 for (i = 0; i < num_vfs; i++) {
255 memset(vf, 0, sizeof(*vf));
256 vf->flags = BNXT_VF_QOS | BNXT_VF_LINK_UP;
261 static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs)
264 struct bnxt_pf_info *pf = &bp->pf;
265 struct hwrm_func_vf_resc_free_input req = {0};
267 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1);
269 mutex_lock(&bp->hwrm_cmd_lock);
270 for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) {
271 req.vf_id = cpu_to_le16(i);
272 rc = _hwrm_send_message(bp, &req, sizeof(req),
277 mutex_unlock(&bp->hwrm_cmd_lock);
281 static void bnxt_free_vf_resources(struct bnxt *bp)
283 struct pci_dev *pdev = bp->pdev;
286 kfree(bp->pf.vf_event_bmap);
287 bp->pf.vf_event_bmap = NULL;
289 for (i = 0; i < 4; i++) {
290 if (bp->pf.hwrm_cmd_req_addr[i]) {
291 dma_free_coherent(&pdev->dev, BNXT_PAGE_SIZE,
292 bp->pf.hwrm_cmd_req_addr[i],
293 bp->pf.hwrm_cmd_req_dma_addr[i]);
294 bp->pf.hwrm_cmd_req_addr[i] = NULL;
302 static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs)
304 struct pci_dev *pdev = bp->pdev;
305 u32 nr_pages, size, i, j, k = 0;
307 bp->pf.vf = kcalloc(num_vfs, sizeof(struct bnxt_vf_info), GFP_KERNEL);
311 bnxt_set_vf_attr(bp, num_vfs);
313 size = num_vfs * BNXT_HWRM_REQ_MAX_SIZE;
314 nr_pages = size / BNXT_PAGE_SIZE;
315 if (size & (BNXT_PAGE_SIZE - 1))
318 for (i = 0; i < nr_pages; i++) {
319 bp->pf.hwrm_cmd_req_addr[i] =
320 dma_alloc_coherent(&pdev->dev, BNXT_PAGE_SIZE,
321 &bp->pf.hwrm_cmd_req_dma_addr[i],
324 if (!bp->pf.hwrm_cmd_req_addr[i])
327 for (j = 0; j < BNXT_HWRM_REQS_PER_PAGE && k < num_vfs; j++) {
328 struct bnxt_vf_info *vf = &bp->pf.vf[k];
330 vf->hwrm_cmd_req_addr = bp->pf.hwrm_cmd_req_addr[i] +
331 j * BNXT_HWRM_REQ_MAX_SIZE;
332 vf->hwrm_cmd_req_dma_addr =
333 bp->pf.hwrm_cmd_req_dma_addr[i] + j *
334 BNXT_HWRM_REQ_MAX_SIZE;
340 bp->pf.vf_event_bmap = kzalloc(16, GFP_KERNEL);
341 if (!bp->pf.vf_event_bmap)
344 bp->pf.hwrm_cmd_req_pages = nr_pages;
348 static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
350 struct hwrm_func_buf_rgtr_input req = {0};
352 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1);
354 req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages);
355 req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT);
356 req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE);
357 req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]);
358 req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]);
359 req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]);
360 req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]);
362 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
365 /* only call by PF to reserve resources for VF */
366 static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
369 u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
371 struct hwrm_func_cfg_input req = {0};
372 struct bnxt_pf_info *pf = &bp->pf;
374 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
376 /* Remaining rings are distributed equally amongs VF's for now */
377 /* TODO: the following workaroud is needed to restrict total number
378 * of vf_cp_rings not exceed number of HW ring groups. This WA should
379 * be removed once new HWRM provides HW ring groups capability in
382 vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
383 vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
384 /* TODO: restore this logic below once the WA above is removed */
385 /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
386 vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
387 if (bp->flags & BNXT_FLAG_AGG_RINGS)
388 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
391 vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
392 vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
393 vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
395 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
396 FUNC_CFG_REQ_ENABLES_MRU |
397 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
398 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
399 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
400 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
401 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
402 FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
403 FUNC_CFG_REQ_ENABLES_NUM_VNICS |
404 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
406 mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
407 req.mru = cpu_to_le16(mtu);
408 req.mtu = cpu_to_le16(mtu);
410 req.num_rsscos_ctxs = cpu_to_le16(1);
411 req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
412 req.num_tx_rings = cpu_to_le16(vf_tx_rings);
413 req.num_rx_rings = cpu_to_le16(vf_rx_rings);
414 req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
415 req.num_l2_ctxs = cpu_to_le16(4);
418 req.num_vnics = cpu_to_le16(vf_vnics);
419 /* FIXME spec currently uses 1 bit for stats ctx */
420 req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
422 mutex_lock(&bp->hwrm_cmd_lock);
423 for (i = 0; i < num_vfs; i++) {
424 req.fid = cpu_to_le16(pf->first_vf_id + i);
425 rc = _hwrm_send_message(bp, &req, sizeof(req),
429 pf->active_vfs = i + 1;
430 pf->vf[i].fw_fid = le16_to_cpu(req.fid);
432 mutex_unlock(&bp->hwrm_cmd_lock);
434 pf->max_tx_rings -= vf_tx_rings * num_vfs;
435 pf->max_rx_rings -= vf_rx_rings * num_vfs;
436 pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
437 pf->max_cp_rings -= vf_cp_rings * num_vfs;
438 pf->max_rsscos_ctxs -= num_vfs;
439 pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
440 pf->max_vnics -= vf_vnics * num_vfs;
445 static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
447 int rc = 0, vfs_supported;
448 int min_rx_rings, min_tx_rings, min_rss_ctxs;
449 int tx_ok = 0, rx_ok = 0, rss_ok = 0;
451 /* Check if we can enable requested num of vf's. At a mininum
452 * we require 1 RX 1 TX rings for each VF. In this minimum conf
453 * features like TPA will not be available.
455 vfs_supported = *num_vfs;
457 while (vfs_supported) {
458 min_rx_rings = vfs_supported;
459 min_tx_rings = vfs_supported;
460 min_rss_ctxs = vfs_supported;
462 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
463 if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
467 if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
472 if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
475 if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
478 if (tx_ok && rx_ok && rss_ok)
484 if (!vfs_supported) {
485 netdev_err(bp->dev, "Cannot enable VF's as all resources are used by PF\n");
489 if (vfs_supported != *num_vfs) {
490 netdev_info(bp->dev, "Requested VFs %d, can enable %d\n",
491 *num_vfs, vfs_supported);
492 *num_vfs = vfs_supported;
495 rc = bnxt_alloc_vf_resources(bp, *num_vfs);
499 /* Reserve resources for VFs */
500 rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
504 /* Register buffers for VFs */
505 rc = bnxt_hwrm_func_buf_rgtr(bp);
509 rc = pci_enable_sriov(bp->pdev, *num_vfs);
516 /* Free the resources reserved for various VF's */
517 bnxt_hwrm_func_vf_resource_free(bp, *num_vfs);
520 bnxt_free_vf_resources(bp);
525 void bnxt_sriov_disable(struct bnxt *bp)
527 u16 num_vfs = pci_num_vf(bp->pdev);
532 if (pci_vfs_assigned(bp->pdev)) {
533 netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n",
536 pci_disable_sriov(bp->pdev);
537 /* Free the HW resources reserved for various VF's */
538 bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
541 bnxt_free_vf_resources(bp);
543 bp->pf.active_vfs = 0;
544 /* Reclaim all resources for the PF. */
545 bnxt_hwrm_func_qcaps(bp);
548 int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
550 struct net_device *dev = pci_get_drvdata(pdev);
551 struct bnxt *bp = netdev_priv(dev);
553 if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
554 netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
559 if (!netif_running(dev)) {
560 netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
564 bp->sriov_cfg = true;
567 if (pci_vfs_assigned(bp->pdev)) {
568 netdev_warn(dev, "Unable to configure SRIOV since some VFs are assigned to VMs.\n");
573 /* Check if enabled VFs is same as requested */
574 if (num_vfs && num_vfs == bp->pf.active_vfs)
577 /* if there are previous existing VFs, clean them up */
578 bnxt_sriov_disable(bp);
582 bnxt_sriov_enable(bp, &num_vfs);
585 bp->sriov_cfg = false;
586 wake_up(&bp->sriov_cfg_wait);
591 static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
592 void *encap_resp, __le64 encap_resp_addr,
593 __le16 encap_resp_cpr, u32 msg_size)
596 struct hwrm_fwd_resp_input req = {0};
597 struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
599 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
601 /* Set the new target id */
602 req.target_id = cpu_to_le16(vf->fw_fid);
603 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
604 req.encap_resp_len = cpu_to_le16(msg_size);
605 req.encap_resp_addr = encap_resp_addr;
606 req.encap_resp_cmpl_ring = encap_resp_cpr;
607 memcpy(req.encap_resp, encap_resp, msg_size);
609 mutex_lock(&bp->hwrm_cmd_lock);
610 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
613 netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc);
617 if (resp->error_code) {
618 netdev_err(bp->dev, "hwrm_fwd_resp error %d\n",
624 mutex_unlock(&bp->hwrm_cmd_lock);
628 static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
632 struct hwrm_reject_fwd_resp_input req = {0};
633 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
635 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
636 /* Set the new target id */
637 req.target_id = cpu_to_le16(vf->fw_fid);
638 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
639 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
641 mutex_lock(&bp->hwrm_cmd_lock);
642 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
645 netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc);
646 goto fwd_err_resp_exit;
649 if (resp->error_code) {
650 netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n",
656 mutex_unlock(&bp->hwrm_cmd_lock);
660 static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
664 struct hwrm_exec_fwd_resp_input req = {0};
665 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
667 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
668 /* Set the new target id */
669 req.target_id = cpu_to_le16(vf->fw_fid);
670 req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
671 memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
673 mutex_lock(&bp->hwrm_cmd_lock);
674 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
677 netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc);
678 goto exec_fwd_resp_exit;
681 if (resp->error_code) {
682 netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n",
688 mutex_unlock(&bp->hwrm_cmd_lock);
692 static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
694 u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
695 struct hwrm_cfa_l2_filter_alloc_input *req =
696 (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
698 if (!is_valid_ether_addr(vf->mac_addr) ||
699 ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
700 return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
702 return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
705 static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
709 if (!(vf->flags & BNXT_VF_LINK_FORCED)) {
711 rc = bnxt_hwrm_exec_fwd_resp(
712 bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
714 struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
715 struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
718 (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr;
719 mutex_lock(&bp->hwrm_cmd_lock);
720 memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
721 sizeof(phy_qcfg_resp));
722 mutex_unlock(&bp->hwrm_cmd_lock);
723 phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
725 if (vf->flags & BNXT_VF_LINK_UP) {
726 /* if physical link is down, force link up on VF */
727 if (phy_qcfg_resp.link ==
728 PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
730 PORT_PHY_QCFG_RESP_LINK_LINK;
731 if (phy_qcfg_resp.auto_link_speed)
732 phy_qcfg_resp.link_speed =
733 phy_qcfg_resp.auto_link_speed;
735 phy_qcfg_resp.link_speed =
736 phy_qcfg_resp.force_link_speed;
737 phy_qcfg_resp.duplex =
738 PORT_PHY_QCFG_RESP_DUPLEX_FULL;
739 phy_qcfg_resp.pause =
740 (PORT_PHY_QCFG_RESP_PAUSE_TX |
741 PORT_PHY_QCFG_RESP_PAUSE_RX);
744 /* force link down */
745 phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
746 phy_qcfg_resp.link_speed = 0;
747 phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
748 phy_qcfg_resp.pause = 0;
750 rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
751 phy_qcfg_req->resp_addr,
752 phy_qcfg_req->cmpl_ring,
753 sizeof(phy_qcfg_resp));
758 static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf)
761 struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr;
762 u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff;
765 case HWRM_CFA_L2_FILTER_ALLOC:
766 rc = bnxt_vf_validate_set_mac(bp, vf);
769 /* TODO Validate if VF is allowed to change mac address,
770 * mtu, num of rings etc
772 rc = bnxt_hwrm_exec_fwd_resp(
773 bp, vf, sizeof(struct hwrm_func_cfg_input));
775 case HWRM_PORT_PHY_QCFG:
776 rc = bnxt_vf_set_link(bp, vf);
784 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
786 u32 i = 0, active_vfs = bp->pf.active_vfs, vf_id;
788 /* Scan through VF's and process commands */
790 vf_id = find_next_bit(bp->pf.vf_event_bmap, active_vfs, i);
791 if (vf_id >= active_vfs)
794 clear_bit(vf_id, bp->pf.vf_event_bmap);
795 bnxt_vf_req_validate_snd(bp, &bp->pf.vf[vf_id]);
800 void bnxt_update_vf_mac(struct bnxt *bp)
802 struct hwrm_func_qcaps_input req = {0};
803 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
805 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
806 req.fid = cpu_to_le16(0xffff);
808 mutex_lock(&bp->hwrm_cmd_lock);
809 if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
810 goto update_vf_mac_exit;
812 if (!is_valid_ether_addr(resp->perm_mac_address))
813 goto update_vf_mac_exit;
815 if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
816 memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
817 /* overwrite netdev dev_adr with admin VF MAC */
818 memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
820 mutex_unlock(&bp->hwrm_cmd_lock);
825 void bnxt_sriov_disable(struct bnxt *bp)
829 void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
831 netdev_err(bp->dev, "Invalid VF message received when SRIOV is not enable\n");
834 void bnxt_update_vf_mac(struct bnxt *bp)