1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/netdevice.h>
11 #include <linux/types.h>
12 #include <linux/errno.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
21 #ifdef CONFIG_BNXT_DCB
22 static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
24 struct hwrm_queue_pri2cos_cfg_input req = {0};
28 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1);
29 req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR |
30 QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN);
32 pri2cos = &req.pri0_cos_queue_id;
33 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
34 req.enables |= cpu_to_le32(
35 QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
37 pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
39 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
43 static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
45 struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
46 struct hwrm_queue_pri2cos_qcfg_input req = {0};
49 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
50 req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
51 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
53 u8 *pri2cos = &resp->pri0_cos_queue_id;
56 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
57 u8 queue_id = pri2cos[i];
59 for (j = 0; j < bp->max_tc; j++) {
60 if (bp->q_info[j].queue_id == queue_id) {
70 static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
73 struct hwrm_queue_cos2bw_cfg_input req = {0};
74 struct bnxt_cos2bw_cfg cos2bw;
78 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
80 for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
81 req.enables |= cpu_to_le32(
82 QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
84 memset(&cos2bw, 0, sizeof(cos2bw));
85 cos2bw.queue_id = bp->q_info[i].queue_id;
86 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
88 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
92 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
93 cos2bw.bw_weight = ets->tc_tx_bw[i];
95 memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
97 req.queue_id0 = cos2bw.queue_id;
101 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
105 static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
107 struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
108 struct hwrm_queue_cos2bw_qcfg_input req = {0};
109 struct bnxt_cos2bw_cfg cos2bw;
113 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
114 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
118 data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
119 for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
122 memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
124 cos2bw.queue_id = resp->queue_id0;
126 for (j = 0; j < bp->max_tc; j++) {
127 if (bp->q_info[j].queue_id != cos2bw.queue_id)
130 QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
131 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
133 ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
134 ets->tc_tx_bw[j] = cos2bw.bw_weight;
141 static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
143 struct hwrm_queue_cfg_input req = {0};
146 if (netif_running(bp->dev))
149 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
150 req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
151 req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
153 /* Configure lossless queues to lossy first */
154 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
155 for (i = 0; i < bp->max_tc; i++) {
156 if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
157 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
158 hwrm_send_message(bp, &req, sizeof(req),
160 bp->q_info[i].queue_profile =
161 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
165 /* Now configure desired queues to lossless */
166 req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
167 for (i = 0; i < bp->max_tc; i++) {
168 if (lltc_mask & (1 << i)) {
169 req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
170 hwrm_send_message(bp, &req, sizeof(req),
172 bp->q_info[i].queue_profile =
173 QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
176 if (netif_running(bp->dev))
182 static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc)
184 struct hwrm_queue_pfcenable_cfg_input req = {0};
185 struct ieee_ets *my_ets = bp->ieee_ets;
186 unsigned int tc_mask = 0, pri_mask = 0;
187 u8 i, pri, lltc_count = 0;
188 bool need_q_recfg = false;
194 for (i = 0; i < bp->max_tc; i++) {
195 for (pri = 0; pri < IEEE_8021QAZ_MAX_TCS; pri++) {
196 if ((pfc->pfc_en & (1 << pri)) &&
197 (my_ets->prio_tc[pri] == i)) {
198 pri_mask |= 1 << pri;
202 if (tc_mask & (1 << i))
205 if (lltc_count > bp->max_lltc)
208 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
209 req.flags = cpu_to_le32(pri_mask);
210 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
214 for (i = 0; i < bp->max_tc; i++) {
215 if (tc_mask & (1 << i)) {
216 if (!BNXT_LLQ(bp->q_info[i].queue_profile))
222 rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
227 static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
229 struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
230 struct hwrm_queue_pfcenable_qcfg_input req = {0};
234 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
235 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
239 pri_mask = le32_to_cpu(resp->flags);
240 pfc->pfc_en = pri_mask;
244 static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
246 int total_ets_bw = 0;
250 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
251 if (ets->prio_tc[i] > bp->max_tc) {
252 netdev_err(bp->dev, "priority to TC mapping exceeds TC count %d\n",
256 if (ets->prio_tc[i] > max_tc)
257 max_tc = ets->prio_tc[i];
259 if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
262 switch (ets->tc_tsa[i]) {
263 case IEEE_8021QAZ_TSA_STRICT:
265 case IEEE_8021QAZ_TSA_ETS:
266 total_ets_bw += ets->tc_tx_bw[i];
272 if (total_ets_bw > 100)
279 static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
281 struct bnxt *bp = netdev_priv(dev);
282 struct ieee_ets *my_ets = bp->ieee_ets;
284 ets->ets_cap = bp->max_tc;
289 if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
292 my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
295 rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
298 rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
303 ets->cbs = my_ets->cbs;
304 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
305 memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
306 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
307 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
311 static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
313 struct bnxt *bp = netdev_priv(dev);
314 struct ieee_ets *my_ets = bp->ieee_ets;
318 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
319 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
322 rc = bnxt_ets_validate(bp, ets, &max_tc);
325 my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
328 /* initialize PRI2TC mappings to invalid value */
329 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
330 my_ets->prio_tc[i] = IEEE_8021QAZ_MAX_TCS;
331 bp->ieee_ets = my_ets;
333 rc = bnxt_setup_mq_tc(dev, max_tc);
336 rc = bnxt_hwrm_queue_cos2bw_cfg(bp, ets, max_tc);
339 rc = bnxt_hwrm_queue_pri2cos_cfg(bp, ets);
342 memcpy(my_ets, ets, sizeof(*my_ets));
347 static int bnxt_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
349 struct bnxt *bp = netdev_priv(dev);
350 __le64 *stats = (__le64 *)bp->hw_rx_port_stats;
351 struct ieee_pfc *my_pfc = bp->ieee_pfc;
355 pfc->pfc_cap = bp->max_lltc;
358 if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
361 my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
364 bp->ieee_pfc = my_pfc;
365 rc = bnxt_hwrm_queue_pfc_qcfg(bp, my_pfc);
370 pfc->pfc_en = my_pfc->pfc_en;
371 pfc->mbc = my_pfc->mbc;
372 pfc->delay = my_pfc->delay;
377 rx_off = BNXT_RX_STATS_OFFSET(rx_pfc_ena_frames_pri0);
378 tx_off = BNXT_TX_STATS_OFFSET(tx_pfc_ena_frames_pri0);
379 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++, rx_off++, tx_off++) {
380 pfc->requests[i] = le64_to_cpu(*(stats + tx_off));
381 pfc->indications[i] = le64_to_cpu(*(stats + rx_off));
387 static int bnxt_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
389 struct bnxt *bp = netdev_priv(dev);
390 struct ieee_pfc *my_pfc = bp->ieee_pfc;
393 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
394 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
398 my_pfc = kzalloc(sizeof(*my_pfc), GFP_KERNEL);
401 bp->ieee_pfc = my_pfc;
403 rc = bnxt_hwrm_queue_pfc_cfg(bp, pfc);
405 memcpy(my_pfc, pfc, sizeof(*my_pfc));
410 static int bnxt_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
412 struct bnxt *bp = netdev_priv(dev);
415 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
416 !(bp->dcbx_cap & DCB_CAP_DCBX_HOST))
419 rc = dcb_ieee_setapp(dev, app);
423 static int bnxt_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
425 struct bnxt *bp = netdev_priv(dev);
428 if (!(bp->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
431 rc = dcb_ieee_delapp(dev, app);
435 static u8 bnxt_dcbnl_getdcbx(struct net_device *dev)
437 struct bnxt *bp = netdev_priv(dev);
442 static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
444 struct bnxt *bp = netdev_priv(dev);
446 /* only support IEEE */
447 if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
450 if ((mode & DCB_CAP_DCBX_HOST) && BNXT_VF(bp))
453 if (mode == bp->dcbx_cap)
460 static const struct dcbnl_rtnl_ops dcbnl_ops = {
461 .ieee_getets = bnxt_dcbnl_ieee_getets,
462 .ieee_setets = bnxt_dcbnl_ieee_setets,
463 .ieee_getpfc = bnxt_dcbnl_ieee_getpfc,
464 .ieee_setpfc = bnxt_dcbnl_ieee_setpfc,
465 .ieee_setapp = bnxt_dcbnl_ieee_setapp,
466 .ieee_delapp = bnxt_dcbnl_ieee_delapp,
467 .getdcbx = bnxt_dcbnl_getdcbx,
468 .setdcbx = bnxt_dcbnl_setdcbx,
471 void bnxt_dcb_init(struct bnxt *bp)
473 if (bp->hwrm_spec_code < 0x10501)
476 bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
477 if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
478 bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
480 bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
481 bp->dev->dcbnl_ops = &dcbnl_ops;
484 void bnxt_dcb_free(struct bnxt *bp)
494 void bnxt_dcb_init(struct bnxt *bp)
498 void bnxt_dcb_free(struct bnxt *bp)