]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bnxt/bnxt.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2017 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/rtc.h>
37 #include <linux/bpf.h>
38 #include <net/ip.h>
39 #include <net/tcp.h>
40 #include <net/udp.h>
41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h>
43 #include <net/udp_tunnel.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/cache.h>
47 #include <linux/log2.h>
48 #include <linux/aer.h>
49 #include <linux/bitmap.h>
50 #include <linux/cpu_rmap.h>
51
52 #include "bnxt_hsi.h"
53 #include "bnxt.h"
54 #include "bnxt_ulp.h"
55 #include "bnxt_sriov.h"
56 #include "bnxt_ethtool.h"
57 #include "bnxt_dcb.h"
58 #include "bnxt_xdp.h"
59
60 #define BNXT_TX_TIMEOUT         (5 * HZ)
61
62 static const char version[] =
63         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
64
65 MODULE_LICENSE("GPL");
66 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
67 MODULE_VERSION(DRV_MODULE_VERSION);
68
69 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
70 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
71 #define BNXT_RX_COPY_THRESH 256
72
73 #define BNXT_TX_PUSH_THRESH 164
74
75 enum board_idx {
76         BCM57301,
77         BCM57302,
78         BCM57304,
79         BCM57417_NPAR,
80         BCM58700,
81         BCM57311,
82         BCM57312,
83         BCM57402,
84         BCM57404,
85         BCM57406,
86         BCM57402_NPAR,
87         BCM57407,
88         BCM57412,
89         BCM57414,
90         BCM57416,
91         BCM57417,
92         BCM57412_NPAR,
93         BCM57314,
94         BCM57417_SFP,
95         BCM57416_SFP,
96         BCM57404_NPAR,
97         BCM57406_NPAR,
98         BCM57407_SFP,
99         BCM57407_NPAR,
100         BCM57414_NPAR,
101         BCM57416_NPAR,
102         BCM57452,
103         BCM57454,
104         NETXTREME_E_VF,
105         NETXTREME_C_VF,
106 };
107
108 /* indexed by enum above */
109 static const struct {
110         char *name;
111 } board_info[] = {
112         { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
113         { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
114         { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
115         { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
116         { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
117         { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
118         { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
119         { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
120         { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
121         { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
122         { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
123         { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
124         { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
125         { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
126         { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
127         { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
128         { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
129         { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130         { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
131         { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
132         { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
133         { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
134         { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
135         { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
136         { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
137         { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
138         { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
139         { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
140         { "Broadcom NetXtreme-E Ethernet Virtual Function" },
141         { "Broadcom NetXtreme-C Ethernet Virtual Function" },
142 };
143
144 static const struct pci_device_id bnxt_pci_tbl[] = {
145         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
146         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
147         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
148         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
149         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
150         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
151         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
152         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
153         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
154         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
155         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
156         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
157         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
158         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
159         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
160         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
161         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
162         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
163         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
164         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
165         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
166         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
167         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
168         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
169         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
170         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
171         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
172         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
173         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
174         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
175         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
176         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
177 #ifdef CONFIG_BNXT_SRIOV
178         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
179         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
180         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
181         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
182         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
183         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
184         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
185         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
186 #endif
187         { 0 }
188 };
189
190 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
191
192 static const u16 bnxt_vf_req_snif[] = {
193         HWRM_FUNC_CFG,
194         HWRM_PORT_PHY_QCFG,
195         HWRM_CFA_L2_FILTER_ALLOC,
196 };
197
198 static const u16 bnxt_async_events_arr[] = {
199         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
200         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
201         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
202         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
203         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
204 };
205
206 static bool bnxt_vf_pciid(enum board_idx idx)
207 {
208         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
209 }
210
211 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
212 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
213 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
214
215 #define BNXT_CP_DB_REARM(db, raw_cons)                                  \
216                 writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
217
218 #define BNXT_CP_DB(db, raw_cons)                                        \
219                 writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
220
221 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
222                 writel(DB_CP_IRQ_DIS_FLAGS, db)
223
224 const u16 bnxt_lhint_arr[] = {
225         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
226         TX_BD_FLAGS_LHINT_512_TO_1023,
227         TX_BD_FLAGS_LHINT_1024_TO_2047,
228         TX_BD_FLAGS_LHINT_1024_TO_2047,
229         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
230         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
231         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
232         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
233         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
234         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
235         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
236         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
237         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
238         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
239         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
240         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
241         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
242         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
243         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
244 };
245
246 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 {
248         struct bnxt *bp = netdev_priv(dev);
249         struct tx_bd *txbd;
250         struct tx_bd_ext *txbd1;
251         struct netdev_queue *txq;
252         int i;
253         dma_addr_t mapping;
254         unsigned int length, pad = 0;
255         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
256         u16 prod, last_frag;
257         struct pci_dev *pdev = bp->pdev;
258         struct bnxt_tx_ring_info *txr;
259         struct bnxt_sw_tx_bd *tx_buf;
260
261         i = skb_get_queue_mapping(skb);
262         if (unlikely(i >= bp->tx_nr_rings)) {
263                 dev_kfree_skb_any(skb);
264                 return NETDEV_TX_OK;
265         }
266
267         txq = netdev_get_tx_queue(dev, i);
268         txr = &bp->tx_ring[bp->tx_ring_map[i]];
269         prod = txr->tx_prod;
270
271         free_size = bnxt_tx_avail(bp, txr);
272         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
273                 netif_tx_stop_queue(txq);
274                 return NETDEV_TX_BUSY;
275         }
276
277         length = skb->len;
278         len = skb_headlen(skb);
279         last_frag = skb_shinfo(skb)->nr_frags;
280
281         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
282
283         txbd->tx_bd_opaque = prod;
284
285         tx_buf = &txr->tx_buf_ring[prod];
286         tx_buf->skb = skb;
287         tx_buf->nr_frags = last_frag;
288
289         vlan_tag_flags = 0;
290         cfa_action = 0;
291         if (skb_vlan_tag_present(skb)) {
292                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
293                                  skb_vlan_tag_get(skb);
294                 /* Currently supports 8021Q, 8021AD vlan offloads
295                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
296                  */
297                 if (skb->vlan_proto == htons(ETH_P_8021Q))
298                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
299         }
300
301         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
302                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
303                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
304                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
305                 void *pdata = tx_push_buf->data;
306                 u64 *end;
307                 int j, push_len;
308
309                 /* Set COAL_NOW to be ready quickly for the next push */
310                 tx_push->tx_bd_len_flags_type =
311                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
312                                         TX_BD_TYPE_LONG_TX_BD |
313                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
314                                         TX_BD_FLAGS_COAL_NOW |
315                                         TX_BD_FLAGS_PACKET_END |
316                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
317
318                 if (skb->ip_summed == CHECKSUM_PARTIAL)
319                         tx_push1->tx_bd_hsize_lflags =
320                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
321                 else
322                         tx_push1->tx_bd_hsize_lflags = 0;
323
324                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
325                 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
326
327                 end = pdata + length;
328                 end = PTR_ALIGN(end, 8) - 1;
329                 *end = 0;
330
331                 skb_copy_from_linear_data(skb, pdata, len);
332                 pdata += len;
333                 for (j = 0; j < last_frag; j++) {
334                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
335                         void *fptr;
336
337                         fptr = skb_frag_address_safe(frag);
338                         if (!fptr)
339                                 goto normal_tx;
340
341                         memcpy(pdata, fptr, skb_frag_size(frag));
342                         pdata += skb_frag_size(frag);
343                 }
344
345                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
346                 txbd->tx_bd_haddr = txr->data_mapping;
347                 prod = NEXT_TX(prod);
348                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
349                 memcpy(txbd, tx_push1, sizeof(*txbd));
350                 prod = NEXT_TX(prod);
351                 tx_push->doorbell =
352                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
353                 txr->tx_prod = prod;
354
355                 tx_buf->is_push = 1;
356                 netdev_tx_sent_queue(txq, skb->len);
357                 wmb();  /* Sync is_push and byte queue before pushing data */
358
359                 push_len = (length + sizeof(*tx_push) + 7) / 8;
360                 if (push_len > 16) {
361                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
362                         __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
363                                          (push_len - 16) << 1);
364                 } else {
365                         __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
366                                          push_len);
367                 }
368
369                 goto tx_done;
370         }
371
372 normal_tx:
373         if (length < BNXT_MIN_PKT_SIZE) {
374                 pad = BNXT_MIN_PKT_SIZE - length;
375                 if (skb_pad(skb, pad)) {
376                         /* SKB already freed. */
377                         tx_buf->skb = NULL;
378                         return NETDEV_TX_OK;
379                 }
380                 length = BNXT_MIN_PKT_SIZE;
381         }
382
383         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
384
385         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
386                 dev_kfree_skb_any(skb);
387                 tx_buf->skb = NULL;
388                 return NETDEV_TX_OK;
389         }
390
391         dma_unmap_addr_set(tx_buf, mapping, mapping);
392         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
393                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
394
395         txbd->tx_bd_haddr = cpu_to_le64(mapping);
396
397         prod = NEXT_TX(prod);
398         txbd1 = (struct tx_bd_ext *)
399                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
400
401         txbd1->tx_bd_hsize_lflags = 0;
402         if (skb_is_gso(skb)) {
403                 u32 hdr_len;
404
405                 if (skb->encapsulation)
406                         hdr_len = skb_inner_network_offset(skb) +
407                                 skb_inner_network_header_len(skb) +
408                                 inner_tcp_hdrlen(skb);
409                 else
410                         hdr_len = skb_transport_offset(skb) +
411                                 tcp_hdrlen(skb);
412
413                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
414                                         TX_BD_FLAGS_T_IPID |
415                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
416                 length = skb_shinfo(skb)->gso_size;
417                 txbd1->tx_bd_mss = cpu_to_le32(length);
418                 length += hdr_len;
419         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
420                 txbd1->tx_bd_hsize_lflags =
421                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
422                 txbd1->tx_bd_mss = 0;
423         }
424
425         length >>= 9;
426         flags |= bnxt_lhint_arr[length];
427         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
428
429         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
430         txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
431         for (i = 0; i < last_frag; i++) {
432                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
433
434                 prod = NEXT_TX(prod);
435                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
436
437                 len = skb_frag_size(frag);
438                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
439                                            DMA_TO_DEVICE);
440
441                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
442                         goto tx_dma_error;
443
444                 tx_buf = &txr->tx_buf_ring[prod];
445                 dma_unmap_addr_set(tx_buf, mapping, mapping);
446
447                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
448
449                 flags = len << TX_BD_LEN_SHIFT;
450                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
451         }
452
453         flags &= ~TX_BD_LEN;
454         txbd->tx_bd_len_flags_type =
455                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
456                             TX_BD_FLAGS_PACKET_END);
457
458         netdev_tx_sent_queue(txq, skb->len);
459
460         /* Sync BD data before updating doorbell */
461         wmb();
462
463         prod = NEXT_TX(prod);
464         txr->tx_prod = prod;
465
466         if (!skb->xmit_more || netif_xmit_stopped(txq))
467                 bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
468
469 tx_done:
470
471         mmiowb();
472
473         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
474                 if (skb->xmit_more && !tx_buf->is_push)
475                         bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
476
477                 netif_tx_stop_queue(txq);
478
479                 /* netif_tx_stop_queue() must be done before checking
480                  * tx index in bnxt_tx_avail() below, because in
481                  * bnxt_tx_int(), we update tx index before checking for
482                  * netif_tx_queue_stopped().
483                  */
484                 smp_mb();
485                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
486                         netif_tx_wake_queue(txq);
487         }
488         return NETDEV_TX_OK;
489
490 tx_dma_error:
491         last_frag = i;
492
493         /* start back at beginning and unmap skb */
494         prod = txr->tx_prod;
495         tx_buf = &txr->tx_buf_ring[prod];
496         tx_buf->skb = NULL;
497         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
498                          skb_headlen(skb), PCI_DMA_TODEVICE);
499         prod = NEXT_TX(prod);
500
501         /* unmap remaining mapped pages */
502         for (i = 0; i < last_frag; i++) {
503                 prod = NEXT_TX(prod);
504                 tx_buf = &txr->tx_buf_ring[prod];
505                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
506                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
507                                PCI_DMA_TODEVICE);
508         }
509
510         dev_kfree_skb_any(skb);
511         return NETDEV_TX_OK;
512 }
513
514 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
515 {
516         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
517         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
518         u16 cons = txr->tx_cons;
519         struct pci_dev *pdev = bp->pdev;
520         int i;
521         unsigned int tx_bytes = 0;
522
523         for (i = 0; i < nr_pkts; i++) {
524                 struct bnxt_sw_tx_bd *tx_buf;
525                 struct sk_buff *skb;
526                 int j, last;
527
528                 tx_buf = &txr->tx_buf_ring[cons];
529                 cons = NEXT_TX(cons);
530                 skb = tx_buf->skb;
531                 tx_buf->skb = NULL;
532
533                 if (tx_buf->is_push) {
534                         tx_buf->is_push = 0;
535                         goto next_tx_int;
536                 }
537
538                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
539                                  skb_headlen(skb), PCI_DMA_TODEVICE);
540                 last = tx_buf->nr_frags;
541
542                 for (j = 0; j < last; j++) {
543                         cons = NEXT_TX(cons);
544                         tx_buf = &txr->tx_buf_ring[cons];
545                         dma_unmap_page(
546                                 &pdev->dev,
547                                 dma_unmap_addr(tx_buf, mapping),
548                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
549                                 PCI_DMA_TODEVICE);
550                 }
551
552 next_tx_int:
553                 cons = NEXT_TX(cons);
554
555                 tx_bytes += skb->len;
556                 dev_kfree_skb_any(skb);
557         }
558
559         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
560         txr->tx_cons = cons;
561
562         /* Need to make the tx_cons update visible to bnxt_start_xmit()
563          * before checking for netif_tx_queue_stopped().  Without the
564          * memory barrier, there is a small possibility that bnxt_start_xmit()
565          * will miss it and cause the queue to be stopped forever.
566          */
567         smp_mb();
568
569         if (unlikely(netif_tx_queue_stopped(txq)) &&
570             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
571                 __netif_tx_lock(txq, smp_processor_id());
572                 if (netif_tx_queue_stopped(txq) &&
573                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
574                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
575                         netif_tx_wake_queue(txq);
576                 __netif_tx_unlock(txq);
577         }
578 }
579
580 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
581                                          gfp_t gfp)
582 {
583         struct device *dev = &bp->pdev->dev;
584         struct page *page;
585
586         page = alloc_page(gfp);
587         if (!page)
588                 return NULL;
589
590         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
591                                       DMA_ATTR_WEAK_ORDERING);
592         if (dma_mapping_error(dev, *mapping)) {
593                 __free_page(page);
594                 return NULL;
595         }
596         *mapping += bp->rx_dma_offset;
597         return page;
598 }
599
600 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
601                                        gfp_t gfp)
602 {
603         u8 *data;
604         struct pci_dev *pdev = bp->pdev;
605
606         data = kmalloc(bp->rx_buf_size, gfp);
607         if (!data)
608                 return NULL;
609
610         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
611                                         bp->rx_buf_use_size, bp->rx_dir,
612                                         DMA_ATTR_WEAK_ORDERING);
613
614         if (dma_mapping_error(&pdev->dev, *mapping)) {
615                 kfree(data);
616                 data = NULL;
617         }
618         return data;
619 }
620
621 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
622                        u16 prod, gfp_t gfp)
623 {
624         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
625         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
626         dma_addr_t mapping;
627
628         if (BNXT_RX_PAGE_MODE(bp)) {
629                 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
630
631                 if (!page)
632                         return -ENOMEM;
633
634                 rx_buf->data = page;
635                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
636         } else {
637                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
638
639                 if (!data)
640                         return -ENOMEM;
641
642                 rx_buf->data = data;
643                 rx_buf->data_ptr = data + bp->rx_offset;
644         }
645         rx_buf->mapping = mapping;
646
647         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
648         return 0;
649 }
650
651 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
652 {
653         u16 prod = rxr->rx_prod;
654         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
655         struct rx_bd *cons_bd, *prod_bd;
656
657         prod_rx_buf = &rxr->rx_buf_ring[prod];
658         cons_rx_buf = &rxr->rx_buf_ring[cons];
659
660         prod_rx_buf->data = data;
661         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
662
663         prod_rx_buf->mapping = cons_rx_buf->mapping;
664
665         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
666         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
667
668         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
669 }
670
671 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
672 {
673         u16 next, max = rxr->rx_agg_bmap_size;
674
675         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
676         if (next >= max)
677                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
678         return next;
679 }
680
681 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
682                                      struct bnxt_rx_ring_info *rxr,
683                                      u16 prod, gfp_t gfp)
684 {
685         struct rx_bd *rxbd =
686                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
687         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
688         struct pci_dev *pdev = bp->pdev;
689         struct page *page;
690         dma_addr_t mapping;
691         u16 sw_prod = rxr->rx_sw_agg_prod;
692         unsigned int offset = 0;
693
694         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
695                 page = rxr->rx_page;
696                 if (!page) {
697                         page = alloc_page(gfp);
698                         if (!page)
699                                 return -ENOMEM;
700                         rxr->rx_page = page;
701                         rxr->rx_page_offset = 0;
702                 }
703                 offset = rxr->rx_page_offset;
704                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
705                 if (rxr->rx_page_offset == PAGE_SIZE)
706                         rxr->rx_page = NULL;
707                 else
708                         get_page(page);
709         } else {
710                 page = alloc_page(gfp);
711                 if (!page)
712                         return -ENOMEM;
713         }
714
715         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
716                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
717                                      DMA_ATTR_WEAK_ORDERING);
718         if (dma_mapping_error(&pdev->dev, mapping)) {
719                 __free_page(page);
720                 return -EIO;
721         }
722
723         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
724                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
725
726         __set_bit(sw_prod, rxr->rx_agg_bmap);
727         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
728         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
729
730         rx_agg_buf->page = page;
731         rx_agg_buf->offset = offset;
732         rx_agg_buf->mapping = mapping;
733         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
734         rxbd->rx_bd_opaque = sw_prod;
735         return 0;
736 }
737
738 static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
739                                    u32 agg_bufs)
740 {
741         struct bnxt *bp = bnapi->bp;
742         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
743         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
744         u16 prod = rxr->rx_agg_prod;
745         u16 sw_prod = rxr->rx_sw_agg_prod;
746         u32 i;
747
748         for (i = 0; i < agg_bufs; i++) {
749                 u16 cons;
750                 struct rx_agg_cmp *agg;
751                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
752                 struct rx_bd *prod_bd;
753                 struct page *page;
754
755                 agg = (struct rx_agg_cmp *)
756                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
757                 cons = agg->rx_agg_cmp_opaque;
758                 __clear_bit(cons, rxr->rx_agg_bmap);
759
760                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
761                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
762
763                 __set_bit(sw_prod, rxr->rx_agg_bmap);
764                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
765                 cons_rx_buf = &rxr->rx_agg_ring[cons];
766
767                 /* It is possible for sw_prod to be equal to cons, so
768                  * set cons_rx_buf->page to NULL first.
769                  */
770                 page = cons_rx_buf->page;
771                 cons_rx_buf->page = NULL;
772                 prod_rx_buf->page = page;
773                 prod_rx_buf->offset = cons_rx_buf->offset;
774
775                 prod_rx_buf->mapping = cons_rx_buf->mapping;
776
777                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
778
779                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
780                 prod_bd->rx_bd_opaque = sw_prod;
781
782                 prod = NEXT_RX_AGG(prod);
783                 sw_prod = NEXT_RX_AGG(sw_prod);
784                 cp_cons = NEXT_CMP(cp_cons);
785         }
786         rxr->rx_agg_prod = prod;
787         rxr->rx_sw_agg_prod = sw_prod;
788 }
789
790 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
791                                         struct bnxt_rx_ring_info *rxr,
792                                         u16 cons, void *data, u8 *data_ptr,
793                                         dma_addr_t dma_addr,
794                                         unsigned int offset_and_len)
795 {
796         unsigned int payload = offset_and_len >> 16;
797         unsigned int len = offset_and_len & 0xffff;
798         struct skb_frag_struct *frag;
799         struct page *page = data;
800         u16 prod = rxr->rx_prod;
801         struct sk_buff *skb;
802         int off, err;
803
804         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
805         if (unlikely(err)) {
806                 bnxt_reuse_rx_data(rxr, cons, data);
807                 return NULL;
808         }
809         dma_addr -= bp->rx_dma_offset;
810         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
811                              DMA_ATTR_WEAK_ORDERING);
812
813         if (unlikely(!payload))
814                 payload = eth_get_headlen(data_ptr, len);
815
816         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
817         if (!skb) {
818                 __free_page(page);
819                 return NULL;
820         }
821
822         off = (void *)data_ptr - page_address(page);
823         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
824         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
825                payload + NET_IP_ALIGN);
826
827         frag = &skb_shinfo(skb)->frags[0];
828         skb_frag_size_sub(frag, payload);
829         frag->page_offset += payload;
830         skb->data_len -= payload;
831         skb->tail += payload;
832
833         return skb;
834 }
835
836 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
837                                    struct bnxt_rx_ring_info *rxr, u16 cons,
838                                    void *data, u8 *data_ptr,
839                                    dma_addr_t dma_addr,
840                                    unsigned int offset_and_len)
841 {
842         u16 prod = rxr->rx_prod;
843         struct sk_buff *skb;
844         int err;
845
846         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
847         if (unlikely(err)) {
848                 bnxt_reuse_rx_data(rxr, cons, data);
849                 return NULL;
850         }
851
852         skb = build_skb(data, 0);
853         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
854                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
855         if (!skb) {
856                 kfree(data);
857                 return NULL;
858         }
859
860         skb_reserve(skb, bp->rx_offset);
861         skb_put(skb, offset_and_len & 0xffff);
862         return skb;
863 }
864
865 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
866                                      struct sk_buff *skb, u16 cp_cons,
867                                      u32 agg_bufs)
868 {
869         struct pci_dev *pdev = bp->pdev;
870         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
871         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
872         u16 prod = rxr->rx_agg_prod;
873         u32 i;
874
875         for (i = 0; i < agg_bufs; i++) {
876                 u16 cons, frag_len;
877                 struct rx_agg_cmp *agg;
878                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
879                 struct page *page;
880                 dma_addr_t mapping;
881
882                 agg = (struct rx_agg_cmp *)
883                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
884                 cons = agg->rx_agg_cmp_opaque;
885                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
886                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
887
888                 cons_rx_buf = &rxr->rx_agg_ring[cons];
889                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
890                                    cons_rx_buf->offset, frag_len);
891                 __clear_bit(cons, rxr->rx_agg_bmap);
892
893                 /* It is possible for bnxt_alloc_rx_page() to allocate
894                  * a sw_prod index that equals the cons index, so we
895                  * need to clear the cons entry now.
896                  */
897                 mapping = cons_rx_buf->mapping;
898                 page = cons_rx_buf->page;
899                 cons_rx_buf->page = NULL;
900
901                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
902                         struct skb_shared_info *shinfo;
903                         unsigned int nr_frags;
904
905                         shinfo = skb_shinfo(skb);
906                         nr_frags = --shinfo->nr_frags;
907                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
908
909                         dev_kfree_skb(skb);
910
911                         cons_rx_buf->page = page;
912
913                         /* Update prod since possibly some pages have been
914                          * allocated already.
915                          */
916                         rxr->rx_agg_prod = prod;
917                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
918                         return NULL;
919                 }
920
921                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
922                                      PCI_DMA_FROMDEVICE,
923                                      DMA_ATTR_WEAK_ORDERING);
924
925                 skb->data_len += frag_len;
926                 skb->len += frag_len;
927                 skb->truesize += PAGE_SIZE;
928
929                 prod = NEXT_RX_AGG(prod);
930                 cp_cons = NEXT_CMP(cp_cons);
931         }
932         rxr->rx_agg_prod = prod;
933         return skb;
934 }
935
936 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
937                                u8 agg_bufs, u32 *raw_cons)
938 {
939         u16 last;
940         struct rx_agg_cmp *agg;
941
942         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
943         last = RING_CMP(*raw_cons);
944         agg = (struct rx_agg_cmp *)
945                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
946         return RX_AGG_CMP_VALID(agg, *raw_cons);
947 }
948
949 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
950                                             unsigned int len,
951                                             dma_addr_t mapping)
952 {
953         struct bnxt *bp = bnapi->bp;
954         struct pci_dev *pdev = bp->pdev;
955         struct sk_buff *skb;
956
957         skb = napi_alloc_skb(&bnapi->napi, len);
958         if (!skb)
959                 return NULL;
960
961         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
962                                 bp->rx_dir);
963
964         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
965                len + NET_IP_ALIGN);
966
967         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
968                                    bp->rx_dir);
969
970         skb_put(skb, len);
971         return skb;
972 }
973
974 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
975                            u32 *raw_cons, void *cmp)
976 {
977         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
978         struct rx_cmp *rxcmp = cmp;
979         u32 tmp_raw_cons = *raw_cons;
980         u8 cmp_type, agg_bufs = 0;
981
982         cmp_type = RX_CMP_TYPE(rxcmp);
983
984         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
985                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
986                             RX_CMP_AGG_BUFS) >>
987                            RX_CMP_AGG_BUFS_SHIFT;
988         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
989                 struct rx_tpa_end_cmp *tpa_end = cmp;
990
991                 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
992                             RX_TPA_END_CMP_AGG_BUFS) >>
993                            RX_TPA_END_CMP_AGG_BUFS_SHIFT;
994         }
995
996         if (agg_bufs) {
997                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
998                         return -EBUSY;
999         }
1000         *raw_cons = tmp_raw_cons;
1001         return 0;
1002 }
1003
1004 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1005 {
1006         if (!rxr->bnapi->in_reset) {
1007                 rxr->bnapi->in_reset = true;
1008                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1009                 schedule_work(&bp->sp_task);
1010         }
1011         rxr->rx_next_cons = 0xffff;
1012 }
1013
1014 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1015                            struct rx_tpa_start_cmp *tpa_start,
1016                            struct rx_tpa_start_cmp_ext *tpa_start1)
1017 {
1018         u8 agg_id = TPA_START_AGG_ID(tpa_start);
1019         u16 cons, prod;
1020         struct bnxt_tpa_info *tpa_info;
1021         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1022         struct rx_bd *prod_bd;
1023         dma_addr_t mapping;
1024
1025         cons = tpa_start->rx_tpa_start_cmp_opaque;
1026         prod = rxr->rx_prod;
1027         cons_rx_buf = &rxr->rx_buf_ring[cons];
1028         prod_rx_buf = &rxr->rx_buf_ring[prod];
1029         tpa_info = &rxr->rx_tpa[agg_id];
1030
1031         if (unlikely(cons != rxr->rx_next_cons)) {
1032                 bnxt_sched_reset(bp, rxr);
1033                 return;
1034         }
1035
1036         prod_rx_buf->data = tpa_info->data;
1037         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1038
1039         mapping = tpa_info->mapping;
1040         prod_rx_buf->mapping = mapping;
1041
1042         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1043
1044         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1045
1046         tpa_info->data = cons_rx_buf->data;
1047         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1048         cons_rx_buf->data = NULL;
1049         tpa_info->mapping = cons_rx_buf->mapping;
1050
1051         tpa_info->len =
1052                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1053                                 RX_TPA_START_CMP_LEN_SHIFT;
1054         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1055                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1056
1057                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1058                 tpa_info->gso_type = SKB_GSO_TCPV4;
1059                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1060                 if (hash_type == 3)
1061                         tpa_info->gso_type = SKB_GSO_TCPV6;
1062                 tpa_info->rss_hash =
1063                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1064         } else {
1065                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1066                 tpa_info->gso_type = 0;
1067                 if (netif_msg_rx_err(bp))
1068                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1069         }
1070         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1071         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1072         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1073
1074         rxr->rx_prod = NEXT_RX(prod);
1075         cons = NEXT_RX(cons);
1076         rxr->rx_next_cons = NEXT_RX(cons);
1077         cons_rx_buf = &rxr->rx_buf_ring[cons];
1078
1079         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1080         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1081         cons_rx_buf->data = NULL;
1082 }
1083
1084 static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
1085                            u16 cp_cons, u32 agg_bufs)
1086 {
1087         if (agg_bufs)
1088                 bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1089 }
1090
1091 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1092                                            int payload_off, int tcp_ts,
1093                                            struct sk_buff *skb)
1094 {
1095 #ifdef CONFIG_INET
1096         struct tcphdr *th;
1097         int len, nw_off;
1098         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1099         u32 hdr_info = tpa_info->hdr_info;
1100         bool loopback = false;
1101
1102         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1103         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1104         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1105
1106         /* If the packet is an internal loopback packet, the offsets will
1107          * have an extra 4 bytes.
1108          */
1109         if (inner_mac_off == 4) {
1110                 loopback = true;
1111         } else if (inner_mac_off > 4) {
1112                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1113                                             ETH_HLEN - 2));
1114
1115                 /* We only support inner iPv4/ipv6.  If we don't see the
1116                  * correct protocol ID, it must be a loopback packet where
1117                  * the offsets are off by 4.
1118                  */
1119                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1120                         loopback = true;
1121         }
1122         if (loopback) {
1123                 /* internal loopback packet, subtract all offsets by 4 */
1124                 inner_ip_off -= 4;
1125                 inner_mac_off -= 4;
1126                 outer_ip_off -= 4;
1127         }
1128
1129         nw_off = inner_ip_off - ETH_HLEN;
1130         skb_set_network_header(skb, nw_off);
1131         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1132                 struct ipv6hdr *iph = ipv6_hdr(skb);
1133
1134                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1135                 len = skb->len - skb_transport_offset(skb);
1136                 th = tcp_hdr(skb);
1137                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1138         } else {
1139                 struct iphdr *iph = ip_hdr(skb);
1140
1141                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1142                 len = skb->len - skb_transport_offset(skb);
1143                 th = tcp_hdr(skb);
1144                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1145         }
1146
1147         if (inner_mac_off) { /* tunnel */
1148                 struct udphdr *uh = NULL;
1149                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1150                                             ETH_HLEN - 2));
1151
1152                 if (proto == htons(ETH_P_IP)) {
1153                         struct iphdr *iph = (struct iphdr *)skb->data;
1154
1155                         if (iph->protocol == IPPROTO_UDP)
1156                                 uh = (struct udphdr *)(iph + 1);
1157                 } else {
1158                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1159
1160                         if (iph->nexthdr == IPPROTO_UDP)
1161                                 uh = (struct udphdr *)(iph + 1);
1162                 }
1163                 if (uh) {
1164                         if (uh->check)
1165                                 skb_shinfo(skb)->gso_type |=
1166                                         SKB_GSO_UDP_TUNNEL_CSUM;
1167                         else
1168                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1169                 }
1170         }
1171 #endif
1172         return skb;
1173 }
1174
1175 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1176 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1177
1178 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1179                                            int payload_off, int tcp_ts,
1180                                            struct sk_buff *skb)
1181 {
1182 #ifdef CONFIG_INET
1183         struct tcphdr *th;
1184         int len, nw_off, tcp_opt_len = 0;
1185
1186         if (tcp_ts)
1187                 tcp_opt_len = 12;
1188
1189         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1190                 struct iphdr *iph;
1191
1192                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1193                          ETH_HLEN;
1194                 skb_set_network_header(skb, nw_off);
1195                 iph = ip_hdr(skb);
1196                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1197                 len = skb->len - skb_transport_offset(skb);
1198                 th = tcp_hdr(skb);
1199                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1200         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1201                 struct ipv6hdr *iph;
1202
1203                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1204                          ETH_HLEN;
1205                 skb_set_network_header(skb, nw_off);
1206                 iph = ipv6_hdr(skb);
1207                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1208                 len = skb->len - skb_transport_offset(skb);
1209                 th = tcp_hdr(skb);
1210                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1211         } else {
1212                 dev_kfree_skb_any(skb);
1213                 return NULL;
1214         }
1215
1216         if (nw_off) { /* tunnel */
1217                 struct udphdr *uh = NULL;
1218
1219                 if (skb->protocol == htons(ETH_P_IP)) {
1220                         struct iphdr *iph = (struct iphdr *)skb->data;
1221
1222                         if (iph->protocol == IPPROTO_UDP)
1223                                 uh = (struct udphdr *)(iph + 1);
1224                 } else {
1225                         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1226
1227                         if (iph->nexthdr == IPPROTO_UDP)
1228                                 uh = (struct udphdr *)(iph + 1);
1229                 }
1230                 if (uh) {
1231                         if (uh->check)
1232                                 skb_shinfo(skb)->gso_type |=
1233                                         SKB_GSO_UDP_TUNNEL_CSUM;
1234                         else
1235                                 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1236                 }
1237         }
1238 #endif
1239         return skb;
1240 }
1241
1242 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1243                                            struct bnxt_tpa_info *tpa_info,
1244                                            struct rx_tpa_end_cmp *tpa_end,
1245                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1246                                            struct sk_buff *skb)
1247 {
1248 #ifdef CONFIG_INET
1249         int payload_off;
1250         u16 segs;
1251
1252         segs = TPA_END_TPA_SEGS(tpa_end);
1253         if (segs == 1)
1254                 return skb;
1255
1256         NAPI_GRO_CB(skb)->count = segs;
1257         skb_shinfo(skb)->gso_size =
1258                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1259         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1260         payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1261                        RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1262                       RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1263         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1264         if (likely(skb))
1265                 tcp_gro_complete(skb);
1266 #endif
1267         return skb;
1268 }
1269
1270 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1271                                            struct bnxt_napi *bnapi,
1272                                            u32 *raw_cons,
1273                                            struct rx_tpa_end_cmp *tpa_end,
1274                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1275                                            u8 *event)
1276 {
1277         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1278         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1279         u8 agg_id = TPA_END_AGG_ID(tpa_end);
1280         u8 *data_ptr, agg_bufs;
1281         u16 cp_cons = RING_CMP(*raw_cons);
1282         unsigned int len;
1283         struct bnxt_tpa_info *tpa_info;
1284         dma_addr_t mapping;
1285         struct sk_buff *skb;
1286         void *data;
1287
1288         if (unlikely(bnapi->in_reset)) {
1289                 int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
1290
1291                 if (rc < 0)
1292                         return ERR_PTR(-EBUSY);
1293                 return NULL;
1294         }
1295
1296         tpa_info = &rxr->rx_tpa[agg_id];
1297         data = tpa_info->data;
1298         data_ptr = tpa_info->data_ptr;
1299         prefetch(data_ptr);
1300         len = tpa_info->len;
1301         mapping = tpa_info->mapping;
1302
1303         agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1304                     RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1305
1306         if (agg_bufs) {
1307                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1308                         return ERR_PTR(-EBUSY);
1309
1310                 *event |= BNXT_AGG_EVENT;
1311                 cp_cons = NEXT_CMP(cp_cons);
1312         }
1313
1314         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1315                 bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1316                 if (agg_bufs > MAX_SKB_FRAGS)
1317                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1318                                     agg_bufs, (int)MAX_SKB_FRAGS);
1319                 return NULL;
1320         }
1321
1322         if (len <= bp->rx_copy_thresh) {
1323                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1324                 if (!skb) {
1325                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1326                         return NULL;
1327                 }
1328         } else {
1329                 u8 *new_data;
1330                 dma_addr_t new_mapping;
1331
1332                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1333                 if (!new_data) {
1334                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1335                         return NULL;
1336                 }
1337
1338                 tpa_info->data = new_data;
1339                 tpa_info->data_ptr = new_data + bp->rx_offset;
1340                 tpa_info->mapping = new_mapping;
1341
1342                 skb = build_skb(data, 0);
1343                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1344                                        bp->rx_buf_use_size, bp->rx_dir,
1345                                        DMA_ATTR_WEAK_ORDERING);
1346
1347                 if (!skb) {
1348                         kfree(data);
1349                         bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
1350                         return NULL;
1351                 }
1352                 skb_reserve(skb, bp->rx_offset);
1353                 skb_put(skb, len);
1354         }
1355
1356         if (agg_bufs) {
1357                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1358                 if (!skb) {
1359                         /* Page reuse already handled by bnxt_rx_pages(). */
1360                         return NULL;
1361                 }
1362         }
1363         skb->protocol = eth_type_trans(skb, bp->dev);
1364
1365         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1366                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1367
1368         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1369             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1370                 u16 vlan_proto = tpa_info->metadata >>
1371                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1372                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
1373
1374                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1375         }
1376
1377         skb_checksum_none_assert(skb);
1378         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1379                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380                 skb->csum_level =
1381                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1382         }
1383
1384         if (TPA_END_GRO(tpa_end))
1385                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1386
1387         return skb;
1388 }
1389
1390 /* returns the following:
1391  * 1       - 1 packet successfully received
1392  * 0       - successful TPA_START, packet not completed yet
1393  * -EBUSY  - completion ring does not have all the agg buffers yet
1394  * -ENOMEM - packet aborted due to out of memory
1395  * -EIO    - packet aborted due to hw error indicated in BD
1396  */
1397 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1398                        u8 *event)
1399 {
1400         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1401         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1402         struct net_device *dev = bp->dev;
1403         struct rx_cmp *rxcmp;
1404         struct rx_cmp_ext *rxcmp1;
1405         u32 tmp_raw_cons = *raw_cons;
1406         u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1407         struct bnxt_sw_rx_bd *rx_buf;
1408         unsigned int len;
1409         u8 *data_ptr, agg_bufs, cmp_type;
1410         dma_addr_t dma_addr;
1411         struct sk_buff *skb;
1412         void *data;
1413         int rc = 0;
1414         u32 misc;
1415
1416         rxcmp = (struct rx_cmp *)
1417                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1418
1419         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1420         cp_cons = RING_CMP(tmp_raw_cons);
1421         rxcmp1 = (struct rx_cmp_ext *)
1422                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1423
1424         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1425                 return -EBUSY;
1426
1427         cmp_type = RX_CMP_TYPE(rxcmp);
1428
1429         prod = rxr->rx_prod;
1430
1431         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1432                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1433                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1434
1435                 *event |= BNXT_RX_EVENT;
1436                 goto next_rx_no_prod;
1437
1438         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1439                 skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1440                                    (struct rx_tpa_end_cmp *)rxcmp,
1441                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1442
1443                 if (unlikely(IS_ERR(skb)))
1444                         return -EBUSY;
1445
1446                 rc = -ENOMEM;
1447                 if (likely(skb)) {
1448                         skb_record_rx_queue(skb, bnapi->index);
1449                         napi_gro_receive(&bnapi->napi, skb);
1450                         rc = 1;
1451                 }
1452                 *event |= BNXT_RX_EVENT;
1453                 goto next_rx_no_prod;
1454         }
1455
1456         cons = rxcmp->rx_cmp_opaque;
1457         rx_buf = &rxr->rx_buf_ring[cons];
1458         data = rx_buf->data;
1459         data_ptr = rx_buf->data_ptr;
1460         if (unlikely(cons != rxr->rx_next_cons)) {
1461                 int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
1462
1463                 bnxt_sched_reset(bp, rxr);
1464                 return rc1;
1465         }
1466         prefetch(data_ptr);
1467
1468         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1469         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1470
1471         if (agg_bufs) {
1472                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1473                         return -EBUSY;
1474
1475                 cp_cons = NEXT_CMP(cp_cons);
1476                 *event |= BNXT_AGG_EVENT;
1477         }
1478         *event |= BNXT_RX_EVENT;
1479
1480         rx_buf->data = NULL;
1481         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1482                 bnxt_reuse_rx_data(rxr, cons, data);
1483                 if (agg_bufs)
1484                         bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1485
1486                 rc = -EIO;
1487                 goto next_rx;
1488         }
1489
1490         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1491         dma_addr = rx_buf->mapping;
1492
1493         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1494                 rc = 1;
1495                 goto next_rx;
1496         }
1497
1498         if (len <= bp->rx_copy_thresh) {
1499                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1500                 bnxt_reuse_rx_data(rxr, cons, data);
1501                 if (!skb) {
1502                         rc = -ENOMEM;
1503                         goto next_rx;
1504                 }
1505         } else {
1506                 u32 payload;
1507
1508                 if (rx_buf->data_ptr == data_ptr)
1509                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1510                 else
1511                         payload = 0;
1512                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1513                                       payload | len);
1514                 if (!skb) {
1515                         rc = -ENOMEM;
1516                         goto next_rx;
1517                 }
1518         }
1519
1520         if (agg_bufs) {
1521                 skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1522                 if (!skb) {
1523                         rc = -ENOMEM;
1524                         goto next_rx;
1525                 }
1526         }
1527
1528         if (RX_CMP_HASH_VALID(rxcmp)) {
1529                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1530                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1531
1532                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1533                 if (hash_type != 1 && hash_type != 3)
1534                         type = PKT_HASH_TYPE_L3;
1535                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1536         }
1537
1538         skb->protocol = eth_type_trans(skb, dev);
1539
1540         if ((rxcmp1->rx_cmp_flags2 &
1541              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1542             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1543                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1544                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
1545                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1546
1547                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1548         }
1549
1550         skb_checksum_none_assert(skb);
1551         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1552                 if (dev->features & NETIF_F_RXCSUM) {
1553                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1554                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1555                 }
1556         } else {
1557                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1558                         if (dev->features & NETIF_F_RXCSUM)
1559                                 cpr->rx_l4_csum_errors++;
1560                 }
1561         }
1562
1563         skb_record_rx_queue(skb, bnapi->index);
1564         napi_gro_receive(&bnapi->napi, skb);
1565         rc = 1;
1566
1567 next_rx:
1568         rxr->rx_prod = NEXT_RX(prod);
1569         rxr->rx_next_cons = NEXT_RX(cons);
1570
1571 next_rx_no_prod:
1572         *raw_cons = tmp_raw_cons;
1573
1574         return rc;
1575 }
1576
1577 /* In netpoll mode, if we are using a combined completion ring, we need to
1578  * discard the rx packets and recycle the buffers.
1579  */
1580 static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
1581                                  u32 *raw_cons, u8 *event)
1582 {
1583         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1584         u32 tmp_raw_cons = *raw_cons;
1585         struct rx_cmp_ext *rxcmp1;
1586         struct rx_cmp *rxcmp;
1587         u16 cp_cons;
1588         u8 cmp_type;
1589
1590         cp_cons = RING_CMP(tmp_raw_cons);
1591         rxcmp = (struct rx_cmp *)
1592                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1593
1594         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1595         cp_cons = RING_CMP(tmp_raw_cons);
1596         rxcmp1 = (struct rx_cmp_ext *)
1597                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1598
1599         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1600                 return -EBUSY;
1601
1602         cmp_type = RX_CMP_TYPE(rxcmp);
1603         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1604                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1605                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1606         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1607                 struct rx_tpa_end_cmp_ext *tpa_end1;
1608
1609                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1610                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1611                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1612         }
1613         return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
1614 }
1615
1616 #define BNXT_GET_EVENT_PORT(data)       \
1617         ((data) &                       \
1618          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1619
1620 static int bnxt_async_event_process(struct bnxt *bp,
1621                                     struct hwrm_async_event_cmpl *cmpl)
1622 {
1623         u16 event_id = le16_to_cpu(cmpl->event_id);
1624
1625         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1626         switch (event_id) {
1627         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1628                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1629                 struct bnxt_link_info *link_info = &bp->link_info;
1630
1631                 if (BNXT_VF(bp))
1632                         goto async_event_process_exit;
1633                 if (data1 & 0x20000) {
1634                         u16 fw_speed = link_info->force_link_speed;
1635                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1636
1637                         netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1638                                     speed);
1639                 }
1640                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1641                 /* fall thru */
1642         }
1643         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1644                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1645                 break;
1646         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1647                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1648                 break;
1649         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1650                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1651                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1652
1653                 if (BNXT_VF(bp))
1654                         break;
1655
1656                 if (bp->pf.port_id != port_id)
1657                         break;
1658
1659                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1660                 break;
1661         }
1662         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1663                 if (BNXT_PF(bp))
1664                         goto async_event_process_exit;
1665                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1666                 break;
1667         default:
1668                 goto async_event_process_exit;
1669         }
1670         schedule_work(&bp->sp_task);
1671 async_event_process_exit:
1672         bnxt_ulp_async_events(bp, cmpl);
1673         return 0;
1674 }
1675
1676 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1677 {
1678         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1679         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1680         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1681                                 (struct hwrm_fwd_req_cmpl *)txcmp;
1682
1683         switch (cmpl_type) {
1684         case CMPL_BASE_TYPE_HWRM_DONE:
1685                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1686                 if (seq_id == bp->hwrm_intr_seq_id)
1687                         bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1688                 else
1689                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1690                 break;
1691
1692         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1693                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1694
1695                 if ((vf_id < bp->pf.first_vf_id) ||
1696                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1697                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1698                                    vf_id);
1699                         return -EINVAL;
1700                 }
1701
1702                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1703                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1704                 schedule_work(&bp->sp_task);
1705                 break;
1706
1707         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1708                 bnxt_async_event_process(bp,
1709                                          (struct hwrm_async_event_cmpl *)txcmp);
1710
1711         default:
1712                 break;
1713         }
1714
1715         return 0;
1716 }
1717
1718 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1719 {
1720         struct bnxt_napi *bnapi = dev_instance;
1721         struct bnxt *bp = bnapi->bp;
1722         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1723         u32 cons = RING_CMP(cpr->cp_raw_cons);
1724
1725         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1726         napi_schedule(&bnapi->napi);
1727         return IRQ_HANDLED;
1728 }
1729
1730 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1731 {
1732         u32 raw_cons = cpr->cp_raw_cons;
1733         u16 cons = RING_CMP(raw_cons);
1734         struct tx_cmp *txcmp;
1735
1736         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1737
1738         return TX_CMP_VALID(txcmp, raw_cons);
1739 }
1740
1741 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1742 {
1743         struct bnxt_napi *bnapi = dev_instance;
1744         struct bnxt *bp = bnapi->bp;
1745         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1746         u32 cons = RING_CMP(cpr->cp_raw_cons);
1747         u32 int_status;
1748
1749         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1750
1751         if (!bnxt_has_work(bp, cpr)) {
1752                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1753                 /* return if erroneous interrupt */
1754                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1755                         return IRQ_NONE;
1756         }
1757
1758         /* disable ring IRQ */
1759         BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1760
1761         /* Return here if interrupt is shared and is disabled. */
1762         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1763                 return IRQ_HANDLED;
1764
1765         napi_schedule(&bnapi->napi);
1766         return IRQ_HANDLED;
1767 }
1768
1769 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1770 {
1771         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1772         u32 raw_cons = cpr->cp_raw_cons;
1773         u32 cons;
1774         int tx_pkts = 0;
1775         int rx_pkts = 0;
1776         u8 event = 0;
1777         struct tx_cmp *txcmp;
1778
1779         while (1) {
1780                 int rc;
1781
1782                 cons = RING_CMP(raw_cons);
1783                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1784
1785                 if (!TX_CMP_VALID(txcmp, raw_cons))
1786                         break;
1787
1788                 /* The valid test of the entry must be done first before
1789                  * reading any further.
1790                  */
1791                 dma_rmb();
1792                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1793                         tx_pkts++;
1794                         /* return full budget so NAPI will complete. */
1795                         if (unlikely(tx_pkts > bp->tx_wake_thresh))
1796                                 rx_pkts = budget;
1797                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1798                         if (likely(budget))
1799                                 rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1800                         else
1801                                 rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
1802                                                            &event);
1803                         if (likely(rc >= 0))
1804                                 rx_pkts += rc;
1805                         else if (rc == -EBUSY)  /* partial completion */
1806                                 break;
1807                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1808                                      CMPL_BASE_TYPE_HWRM_DONE) ||
1809                                     (TX_CMP_TYPE(txcmp) ==
1810                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1811                                     (TX_CMP_TYPE(txcmp) ==
1812                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1813                         bnxt_hwrm_handler(bp, txcmp);
1814                 }
1815                 raw_cons = NEXT_RAW_CMP(raw_cons);
1816
1817                 if (rx_pkts == budget)
1818                         break;
1819         }
1820
1821         if (event & BNXT_TX_EVENT) {
1822                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
1823                 void __iomem *db = txr->tx_doorbell;
1824                 u16 prod = txr->tx_prod;
1825
1826                 /* Sync BD data before updating doorbell */
1827                 wmb();
1828
1829                 bnxt_db_write(bp, db, DB_KEY_TX | prod);
1830         }
1831
1832         cpr->cp_raw_cons = raw_cons;
1833         /* ACK completion ring before freeing tx ring and producing new
1834          * buffers in rx/agg rings to prevent overflowing the completion
1835          * ring.
1836          */
1837         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1838
1839         if (tx_pkts)
1840                 bnapi->tx_int(bp, bnapi, tx_pkts);
1841
1842         if (event & BNXT_RX_EVENT) {
1843                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1844
1845                 bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1846                 if (event & BNXT_AGG_EVENT)
1847                         bnxt_db_write(bp, rxr->rx_agg_doorbell,
1848                                       DB_KEY_RX | rxr->rx_agg_prod);
1849         }
1850         return rx_pkts;
1851 }
1852
1853 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
1854 {
1855         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1856         struct bnxt *bp = bnapi->bp;
1857         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1858         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1859         struct tx_cmp *txcmp;
1860         struct rx_cmp_ext *rxcmp1;
1861         u32 cp_cons, tmp_raw_cons;
1862         u32 raw_cons = cpr->cp_raw_cons;
1863         u32 rx_pkts = 0;
1864         u8 event = 0;
1865
1866         while (1) {
1867                 int rc;
1868
1869                 cp_cons = RING_CMP(raw_cons);
1870                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1871
1872                 if (!TX_CMP_VALID(txcmp, raw_cons))
1873                         break;
1874
1875                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1876                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
1877                         cp_cons = RING_CMP(tmp_raw_cons);
1878                         rxcmp1 = (struct rx_cmp_ext *)
1879                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1880
1881                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1882                                 break;
1883
1884                         /* force an error to recycle the buffer */
1885                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1886                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1887
1888                         rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
1889                         if (likely(rc == -EIO))
1890                                 rx_pkts++;
1891                         else if (rc == -EBUSY)  /* partial completion */
1892                                 break;
1893                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
1894                                     CMPL_BASE_TYPE_HWRM_DONE)) {
1895                         bnxt_hwrm_handler(bp, txcmp);
1896                 } else {
1897                         netdev_err(bp->dev,
1898                                    "Invalid completion received on special ring\n");
1899                 }
1900                 raw_cons = NEXT_RAW_CMP(raw_cons);
1901
1902                 if (rx_pkts == budget)
1903                         break;
1904         }
1905
1906         cpr->cp_raw_cons = raw_cons;
1907         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1908         bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
1909
1910         if (event & BNXT_AGG_EVENT)
1911                 bnxt_db_write(bp, rxr->rx_agg_doorbell,
1912                               DB_KEY_RX | rxr->rx_agg_prod);
1913
1914         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
1915                 napi_complete_done(napi, rx_pkts);
1916                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1917         }
1918         return rx_pkts;
1919 }
1920
1921 static int bnxt_poll(struct napi_struct *napi, int budget)
1922 {
1923         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1924         struct bnxt *bp = bnapi->bp;
1925         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1926         int work_done = 0;
1927
1928         while (1) {
1929                 work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1930
1931                 if (work_done >= budget)
1932                         break;
1933
1934                 if (!bnxt_has_work(bp, cpr)) {
1935                         if (napi_complete_done(napi, work_done))
1936                                 BNXT_CP_DB_REARM(cpr->cp_doorbell,
1937                                                  cpr->cp_raw_cons);
1938                         break;
1939                 }
1940         }
1941         mmiowb();
1942         return work_done;
1943 }
1944
1945 static void bnxt_free_tx_skbs(struct bnxt *bp)
1946 {
1947         int i, max_idx;
1948         struct pci_dev *pdev = bp->pdev;
1949
1950         if (!bp->tx_ring)
1951                 return;
1952
1953         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1954         for (i = 0; i < bp->tx_nr_rings; i++) {
1955                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
1956                 int j;
1957
1958                 for (j = 0; j < max_idx;) {
1959                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1960                         struct sk_buff *skb = tx_buf->skb;
1961                         int k, last;
1962
1963                         if (!skb) {
1964                                 j++;
1965                                 continue;
1966                         }
1967
1968                         tx_buf->skb = NULL;
1969
1970                         if (tx_buf->is_push) {
1971                                 dev_kfree_skb(skb);
1972                                 j += 2;
1973                                 continue;
1974                         }
1975
1976                         dma_unmap_single(&pdev->dev,
1977                                          dma_unmap_addr(tx_buf, mapping),
1978                                          skb_headlen(skb),
1979                                          PCI_DMA_TODEVICE);
1980
1981                         last = tx_buf->nr_frags;
1982                         j += 2;
1983                         for (k = 0; k < last; k++, j++) {
1984                                 int ring_idx = j & bp->tx_ring_mask;
1985                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1986
1987                                 tx_buf = &txr->tx_buf_ring[ring_idx];
1988                                 dma_unmap_page(
1989                                         &pdev->dev,
1990                                         dma_unmap_addr(tx_buf, mapping),
1991                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
1992                         }
1993                         dev_kfree_skb(skb);
1994                 }
1995                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1996         }
1997 }
1998
1999 static void bnxt_free_rx_skbs(struct bnxt *bp)
2000 {
2001         int i, max_idx, max_agg_idx;
2002         struct pci_dev *pdev = bp->pdev;
2003
2004         if (!bp->rx_ring)
2005                 return;
2006
2007         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2008         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2009         for (i = 0; i < bp->rx_nr_rings; i++) {
2010                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2011                 int j;
2012
2013                 if (rxr->rx_tpa) {
2014                         for (j = 0; j < MAX_TPA; j++) {
2015                                 struct bnxt_tpa_info *tpa_info =
2016                                                         &rxr->rx_tpa[j];
2017                                 u8 *data = tpa_info->data;
2018
2019                                 if (!data)
2020                                         continue;
2021
2022                                 dma_unmap_single_attrs(&pdev->dev,
2023                                                        tpa_info->mapping,
2024                                                        bp->rx_buf_use_size,
2025                                                        bp->rx_dir,
2026                                                        DMA_ATTR_WEAK_ORDERING);
2027
2028                                 tpa_info->data = NULL;
2029
2030                                 kfree(data);
2031                         }
2032                 }
2033
2034                 for (j = 0; j < max_idx; j++) {
2035                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2036                         dma_addr_t mapping = rx_buf->mapping;
2037                         void *data = rx_buf->data;
2038
2039                         if (!data)
2040                                 continue;
2041
2042                         rx_buf->data = NULL;
2043
2044                         if (BNXT_RX_PAGE_MODE(bp)) {
2045                                 mapping -= bp->rx_dma_offset;
2046                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2047                                                      PAGE_SIZE, bp->rx_dir,
2048                                                      DMA_ATTR_WEAK_ORDERING);
2049                                 __free_page(data);
2050                         } else {
2051                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2052                                                        bp->rx_buf_use_size,
2053                                                        bp->rx_dir,
2054                                                        DMA_ATTR_WEAK_ORDERING);
2055                                 kfree(data);
2056                         }
2057                 }
2058
2059                 for (j = 0; j < max_agg_idx; j++) {
2060                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2061                                 &rxr->rx_agg_ring[j];
2062                         struct page *page = rx_agg_buf->page;
2063
2064                         if (!page)
2065                                 continue;
2066
2067                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2068                                              BNXT_RX_PAGE_SIZE,
2069                                              PCI_DMA_FROMDEVICE,
2070                                              DMA_ATTR_WEAK_ORDERING);
2071
2072                         rx_agg_buf->page = NULL;
2073                         __clear_bit(j, rxr->rx_agg_bmap);
2074
2075                         __free_page(page);
2076                 }
2077                 if (rxr->rx_page) {
2078                         __free_page(rxr->rx_page);
2079                         rxr->rx_page = NULL;
2080                 }
2081         }
2082 }
2083
2084 static void bnxt_free_skbs(struct bnxt *bp)
2085 {
2086         bnxt_free_tx_skbs(bp);
2087         bnxt_free_rx_skbs(bp);
2088 }
2089
2090 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2091 {
2092         struct pci_dev *pdev = bp->pdev;
2093         int i;
2094
2095         for (i = 0; i < ring->nr_pages; i++) {
2096                 if (!ring->pg_arr[i])
2097                         continue;
2098
2099                 dma_free_coherent(&pdev->dev, ring->page_size,
2100                                   ring->pg_arr[i], ring->dma_arr[i]);
2101
2102                 ring->pg_arr[i] = NULL;
2103         }
2104         if (ring->pg_tbl) {
2105                 dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
2106                                   ring->pg_tbl, ring->pg_tbl_map);
2107                 ring->pg_tbl = NULL;
2108         }
2109         if (ring->vmem_size && *ring->vmem) {
2110                 vfree(*ring->vmem);
2111                 *ring->vmem = NULL;
2112         }
2113 }
2114
2115 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
2116 {
2117         int i;
2118         struct pci_dev *pdev = bp->pdev;
2119
2120         if (ring->nr_pages > 1) {
2121                 ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
2122                                                   ring->nr_pages * 8,
2123                                                   &ring->pg_tbl_map,
2124                                                   GFP_KERNEL);
2125                 if (!ring->pg_tbl)
2126                         return -ENOMEM;
2127         }
2128
2129         for (i = 0; i < ring->nr_pages; i++) {
2130                 ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2131                                                      ring->page_size,
2132                                                      &ring->dma_arr[i],
2133                                                      GFP_KERNEL);
2134                 if (!ring->pg_arr[i])
2135                         return -ENOMEM;
2136
2137                 if (ring->nr_pages > 1)
2138                         ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
2139         }
2140
2141         if (ring->vmem_size) {
2142                 *ring->vmem = vzalloc(ring->vmem_size);
2143                 if (!(*ring->vmem))
2144                         return -ENOMEM;
2145         }
2146         return 0;
2147 }
2148
2149 static void bnxt_free_rx_rings(struct bnxt *bp)
2150 {
2151         int i;
2152
2153         if (!bp->rx_ring)
2154                 return;
2155
2156         for (i = 0; i < bp->rx_nr_rings; i++) {
2157                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2158                 struct bnxt_ring_struct *ring;
2159
2160                 if (rxr->xdp_prog)
2161                         bpf_prog_put(rxr->xdp_prog);
2162
2163                 kfree(rxr->rx_tpa);
2164                 rxr->rx_tpa = NULL;
2165
2166                 kfree(rxr->rx_agg_bmap);
2167                 rxr->rx_agg_bmap = NULL;
2168
2169                 ring = &rxr->rx_ring_struct;
2170                 bnxt_free_ring(bp, ring);
2171
2172                 ring = &rxr->rx_agg_ring_struct;
2173                 bnxt_free_ring(bp, ring);
2174         }
2175 }
2176
2177 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2178 {
2179         int i, rc, agg_rings = 0, tpa_rings = 0;
2180
2181         if (!bp->rx_ring)
2182                 return -ENOMEM;
2183
2184         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2185                 agg_rings = 1;
2186
2187         if (bp->flags & BNXT_FLAG_TPA)
2188                 tpa_rings = 1;
2189
2190         for (i = 0; i < bp->rx_nr_rings; i++) {
2191                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2192                 struct bnxt_ring_struct *ring;
2193
2194                 ring = &rxr->rx_ring_struct;
2195
2196                 rc = bnxt_alloc_ring(bp, ring);
2197                 if (rc)
2198                         return rc;
2199
2200                 if (agg_rings) {
2201                         u16 mem_size;
2202
2203                         ring = &rxr->rx_agg_ring_struct;
2204                         rc = bnxt_alloc_ring(bp, ring);
2205                         if (rc)
2206                                 return rc;
2207
2208                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2209                         mem_size = rxr->rx_agg_bmap_size / 8;
2210                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2211                         if (!rxr->rx_agg_bmap)
2212                                 return -ENOMEM;
2213
2214                         if (tpa_rings) {
2215                                 rxr->rx_tpa = kcalloc(MAX_TPA,
2216                                                 sizeof(struct bnxt_tpa_info),
2217                                                 GFP_KERNEL);
2218                                 if (!rxr->rx_tpa)
2219                                         return -ENOMEM;
2220                         }
2221                 }
2222         }
2223         return 0;
2224 }
2225
2226 static void bnxt_free_tx_rings(struct bnxt *bp)
2227 {
2228         int i;
2229         struct pci_dev *pdev = bp->pdev;
2230
2231         if (!bp->tx_ring)
2232                 return;
2233
2234         for (i = 0; i < bp->tx_nr_rings; i++) {
2235                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2236                 struct bnxt_ring_struct *ring;
2237
2238                 if (txr->tx_push) {
2239                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2240                                           txr->tx_push, txr->tx_push_mapping);
2241                         txr->tx_push = NULL;
2242                 }
2243
2244                 ring = &txr->tx_ring_struct;
2245
2246                 bnxt_free_ring(bp, ring);
2247         }
2248 }
2249
2250 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2251 {
2252         int i, j, rc;
2253         struct pci_dev *pdev = bp->pdev;
2254
2255         bp->tx_push_size = 0;
2256         if (bp->tx_push_thresh) {
2257                 int push_size;
2258
2259                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2260                                         bp->tx_push_thresh);
2261
2262                 if (push_size > 256) {
2263                         push_size = 0;
2264                         bp->tx_push_thresh = 0;
2265                 }
2266
2267                 bp->tx_push_size = push_size;
2268         }
2269
2270         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2271                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2272                 struct bnxt_ring_struct *ring;
2273
2274                 ring = &txr->tx_ring_struct;
2275
2276                 rc = bnxt_alloc_ring(bp, ring);
2277                 if (rc)
2278                         return rc;
2279
2280                 if (bp->tx_push_size) {
2281                         dma_addr_t mapping;
2282
2283                         /* One pre-allocated DMA buffer to backup
2284                          * TX push operation
2285                          */
2286                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2287                                                 bp->tx_push_size,
2288                                                 &txr->tx_push_mapping,
2289                                                 GFP_KERNEL);
2290
2291                         if (!txr->tx_push)
2292                                 return -ENOMEM;
2293
2294                         mapping = txr->tx_push_mapping +
2295                                 sizeof(struct tx_push_bd);
2296                         txr->data_mapping = cpu_to_le64(mapping);
2297
2298                         memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
2299                 }
2300                 ring->queue_id = bp->q_info[j].queue_id;
2301                 if (i < bp->tx_nr_rings_xdp)
2302                         continue;
2303                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2304                         j++;
2305         }
2306         return 0;
2307 }
2308
2309 static void bnxt_free_cp_rings(struct bnxt *bp)
2310 {
2311         int i;
2312
2313         if (!bp->bnapi)
2314                 return;
2315
2316         for (i = 0; i < bp->cp_nr_rings; i++) {
2317                 struct bnxt_napi *bnapi = bp->bnapi[i];
2318                 struct bnxt_cp_ring_info *cpr;
2319                 struct bnxt_ring_struct *ring;
2320
2321                 if (!bnapi)
2322                         continue;
2323
2324                 cpr = &bnapi->cp_ring;
2325                 ring = &cpr->cp_ring_struct;
2326
2327                 bnxt_free_ring(bp, ring);
2328         }
2329 }
2330
2331 static int bnxt_alloc_cp_rings(struct bnxt *bp)
2332 {
2333         int i, rc;
2334
2335         for (i = 0; i < bp->cp_nr_rings; i++) {
2336                 struct bnxt_napi *bnapi = bp->bnapi[i];
2337                 struct bnxt_cp_ring_info *cpr;
2338                 struct bnxt_ring_struct *ring;
2339
2340                 if (!bnapi)
2341                         continue;
2342
2343                 cpr = &bnapi->cp_ring;
2344                 ring = &cpr->cp_ring_struct;
2345
2346                 rc = bnxt_alloc_ring(bp, ring);
2347                 if (rc)
2348                         return rc;
2349         }
2350         return 0;
2351 }
2352
2353 static void bnxt_init_ring_struct(struct bnxt *bp)
2354 {
2355         int i;
2356
2357         for (i = 0; i < bp->cp_nr_rings; i++) {
2358                 struct bnxt_napi *bnapi = bp->bnapi[i];
2359                 struct bnxt_cp_ring_info *cpr;
2360                 struct bnxt_rx_ring_info *rxr;
2361                 struct bnxt_tx_ring_info *txr;
2362                 struct bnxt_ring_struct *ring;
2363
2364                 if (!bnapi)
2365                         continue;
2366
2367                 cpr = &bnapi->cp_ring;
2368                 ring = &cpr->cp_ring_struct;
2369                 ring->nr_pages = bp->cp_nr_pages;
2370                 ring->page_size = HW_CMPD_RING_SIZE;
2371                 ring->pg_arr = (void **)cpr->cp_desc_ring;
2372                 ring->dma_arr = cpr->cp_desc_mapping;
2373                 ring->vmem_size = 0;
2374
2375                 rxr = bnapi->rx_ring;
2376                 if (!rxr)
2377                         goto skip_rx;
2378
2379                 ring = &rxr->rx_ring_struct;
2380                 ring->nr_pages = bp->rx_nr_pages;
2381                 ring->page_size = HW_RXBD_RING_SIZE;
2382                 ring->pg_arr = (void **)rxr->rx_desc_ring;
2383                 ring->dma_arr = rxr->rx_desc_mapping;
2384                 ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2385                 ring->vmem = (void **)&rxr->rx_buf_ring;
2386
2387                 ring = &rxr->rx_agg_ring_struct;
2388                 ring->nr_pages = bp->rx_agg_nr_pages;
2389                 ring->page_size = HW_RXBD_RING_SIZE;
2390                 ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
2391                 ring->dma_arr = rxr->rx_agg_desc_mapping;
2392                 ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2393                 ring->vmem = (void **)&rxr->rx_agg_ring;
2394
2395 skip_rx:
2396                 txr = bnapi->tx_ring;
2397                 if (!txr)
2398                         continue;
2399
2400                 ring = &txr->tx_ring_struct;
2401                 ring->nr_pages = bp->tx_nr_pages;
2402                 ring->page_size = HW_RXBD_RING_SIZE;
2403                 ring->pg_arr = (void **)txr->tx_desc_ring;
2404                 ring->dma_arr = txr->tx_desc_mapping;
2405                 ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2406                 ring->vmem = (void **)&txr->tx_buf_ring;
2407         }
2408 }
2409
2410 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2411 {
2412         int i;
2413         u32 prod;
2414         struct rx_bd **rx_buf_ring;
2415
2416         rx_buf_ring = (struct rx_bd **)ring->pg_arr;
2417         for (i = 0, prod = 0; i < ring->nr_pages; i++) {
2418                 int j;
2419                 struct rx_bd *rxbd;
2420
2421                 rxbd = rx_buf_ring[i];
2422                 if (!rxbd)
2423                         continue;
2424
2425                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2426                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2427                         rxbd->rx_bd_opaque = prod;
2428                 }
2429         }
2430 }
2431
2432 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2433 {
2434         struct net_device *dev = bp->dev;
2435         struct bnxt_rx_ring_info *rxr;
2436         struct bnxt_ring_struct *ring;
2437         u32 prod, type;
2438         int i;
2439
2440         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2441                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2442
2443         if (NET_IP_ALIGN == 2)
2444                 type |= RX_BD_FLAGS_SOP;
2445
2446         rxr = &bp->rx_ring[ring_nr];
2447         ring = &rxr->rx_ring_struct;
2448         bnxt_init_rxbd_pages(ring, type);
2449
2450         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2451                 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2452                 if (IS_ERR(rxr->xdp_prog)) {
2453                         int rc = PTR_ERR(rxr->xdp_prog);
2454
2455                         rxr->xdp_prog = NULL;
2456                         return rc;
2457                 }
2458         }
2459         prod = rxr->rx_prod;
2460         for (i = 0; i < bp->rx_ring_size; i++) {
2461                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2462                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2463                                     ring_nr, i, bp->rx_ring_size);
2464                         break;
2465                 }
2466                 prod = NEXT_RX(prod);
2467         }
2468         rxr->rx_prod = prod;
2469         ring->fw_ring_id = INVALID_HW_RING_ID;
2470
2471         ring = &rxr->rx_agg_ring_struct;
2472         ring->fw_ring_id = INVALID_HW_RING_ID;
2473
2474         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2475                 return 0;
2476
2477         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
2478                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2479
2480         bnxt_init_rxbd_pages(ring, type);
2481
2482         prod = rxr->rx_agg_prod;
2483         for (i = 0; i < bp->rx_agg_ring_size; i++) {
2484                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2485                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2486                                     ring_nr, i, bp->rx_ring_size);
2487                         break;
2488                 }
2489                 prod = NEXT_RX_AGG(prod);
2490         }
2491         rxr->rx_agg_prod = prod;
2492
2493         if (bp->flags & BNXT_FLAG_TPA) {
2494                 if (rxr->rx_tpa) {
2495                         u8 *data;
2496                         dma_addr_t mapping;
2497
2498                         for (i = 0; i < MAX_TPA; i++) {
2499                                 data = __bnxt_alloc_rx_data(bp, &mapping,
2500                                                             GFP_KERNEL);
2501                                 if (!data)
2502                                         return -ENOMEM;
2503
2504                                 rxr->rx_tpa[i].data = data;
2505                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
2506                                 rxr->rx_tpa[i].mapping = mapping;
2507                         }
2508                 } else {
2509                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2510                         return -ENOMEM;
2511                 }
2512         }
2513
2514         return 0;
2515 }
2516
2517 static void bnxt_init_cp_rings(struct bnxt *bp)
2518 {
2519         int i;
2520
2521         for (i = 0; i < bp->cp_nr_rings; i++) {
2522                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2523                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2524
2525                 ring->fw_ring_id = INVALID_HW_RING_ID;
2526         }
2527 }
2528
2529 static int bnxt_init_rx_rings(struct bnxt *bp)
2530 {
2531         int i, rc = 0;
2532
2533         if (BNXT_RX_PAGE_MODE(bp)) {
2534                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2535                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
2536         } else {
2537                 bp->rx_offset = BNXT_RX_OFFSET;
2538                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2539         }
2540
2541         for (i = 0; i < bp->rx_nr_rings; i++) {
2542                 rc = bnxt_init_one_rx_ring(bp, i);
2543                 if (rc)
2544                         break;
2545         }
2546
2547         return rc;
2548 }
2549
2550 static int bnxt_init_tx_rings(struct bnxt *bp)
2551 {
2552         u16 i;
2553
2554         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2555                                    MAX_SKB_FRAGS + 1);
2556
2557         for (i = 0; i < bp->tx_nr_rings; i++) {
2558                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2559                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2560
2561                 ring->fw_ring_id = INVALID_HW_RING_ID;
2562         }
2563
2564         return 0;
2565 }
2566
2567 static void bnxt_free_ring_grps(struct bnxt *bp)
2568 {
2569         kfree(bp->grp_info);
2570         bp->grp_info = NULL;
2571 }
2572
2573 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2574 {
2575         int i;
2576
2577         if (irq_re_init) {
2578                 bp->grp_info = kcalloc(bp->cp_nr_rings,
2579                                        sizeof(struct bnxt_ring_grp_info),
2580                                        GFP_KERNEL);
2581                 if (!bp->grp_info)
2582                         return -ENOMEM;
2583         }
2584         for (i = 0; i < bp->cp_nr_rings; i++) {
2585                 if (irq_re_init)
2586                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2587                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2588                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2589                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2590                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2591         }
2592         return 0;
2593 }
2594
2595 static void bnxt_free_vnics(struct bnxt *bp)
2596 {
2597         kfree(bp->vnic_info);
2598         bp->vnic_info = NULL;
2599         bp->nr_vnics = 0;
2600 }
2601
2602 static int bnxt_alloc_vnics(struct bnxt *bp)
2603 {
2604         int num_vnics = 1;
2605
2606 #ifdef CONFIG_RFS_ACCEL
2607         if (bp->flags & BNXT_FLAG_RFS)
2608                 num_vnics += bp->rx_nr_rings;
2609 #endif
2610
2611         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2612                 num_vnics++;
2613
2614         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2615                                 GFP_KERNEL);
2616         if (!bp->vnic_info)
2617                 return -ENOMEM;
2618
2619         bp->nr_vnics = num_vnics;
2620         return 0;
2621 }
2622
2623 static void bnxt_init_vnics(struct bnxt *bp)
2624 {
2625         int i;
2626
2627         for (i = 0; i < bp->nr_vnics; i++) {
2628                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2629
2630                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2631                 vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
2632                 vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
2633                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2634
2635                 if (bp->vnic_info[i].rss_hash_key) {
2636                         if (i == 0)
2637                                 prandom_bytes(vnic->rss_hash_key,
2638                                               HW_HASH_KEY_SIZE);
2639                         else
2640                                 memcpy(vnic->rss_hash_key,
2641                                        bp->vnic_info[0].rss_hash_key,
2642                                        HW_HASH_KEY_SIZE);
2643                 }
2644         }
2645 }
2646
2647 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2648 {
2649         int pages;
2650
2651         pages = ring_size / desc_per_pg;
2652
2653         if (!pages)
2654                 return 1;
2655
2656         pages++;
2657
2658         while (pages & (pages - 1))
2659                 pages++;
2660
2661         return pages;
2662 }
2663
2664 void bnxt_set_tpa_flags(struct bnxt *bp)
2665 {
2666         bp->flags &= ~BNXT_FLAG_TPA;
2667         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
2668                 return;
2669         if (bp->dev->features & NETIF_F_LRO)
2670                 bp->flags |= BNXT_FLAG_LRO;
2671         if (bp->dev->features & NETIF_F_GRO)
2672                 bp->flags |= BNXT_FLAG_GRO;
2673 }
2674
2675 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2676  * be set on entry.
2677  */
2678 void bnxt_set_ring_params(struct bnxt *bp)
2679 {
2680         u32 ring_size, rx_size, rx_space;
2681         u32 agg_factor = 0, agg_ring_size = 0;
2682
2683         /* 8 for CRC and VLAN */
2684         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2685
2686         rx_space = rx_size + NET_SKB_PAD +
2687                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2688
2689         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2690         ring_size = bp->rx_ring_size;
2691         bp->rx_agg_ring_size = 0;
2692         bp->rx_agg_nr_pages = 0;
2693
2694         if (bp->flags & BNXT_FLAG_TPA)
2695                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
2696
2697         bp->flags &= ~BNXT_FLAG_JUMBO;
2698         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
2699                 u32 jumbo_factor;
2700
2701                 bp->flags |= BNXT_FLAG_JUMBO;
2702                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2703                 if (jumbo_factor > agg_factor)
2704                         agg_factor = jumbo_factor;
2705         }
2706         agg_ring_size = ring_size * agg_factor;
2707
2708         if (agg_ring_size) {
2709                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2710                                                         RX_DESC_CNT);
2711                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2712                         u32 tmp = agg_ring_size;
2713
2714                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2715                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2716                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2717                                     tmp, agg_ring_size);
2718                 }
2719                 bp->rx_agg_ring_size = agg_ring_size;
2720                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2721                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2722                 rx_space = rx_size + NET_SKB_PAD +
2723                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2724         }
2725
2726         bp->rx_buf_use_size = rx_size;
2727         bp->rx_buf_size = rx_space;
2728
2729         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2730         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2731
2732         ring_size = bp->tx_ring_size;
2733         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2734         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2735
2736         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2737         bp->cp_ring_size = ring_size;
2738
2739         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2740         if (bp->cp_nr_pages > MAX_CP_PAGES) {
2741                 bp->cp_nr_pages = MAX_CP_PAGES;
2742                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2743                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2744                             ring_size, bp->cp_ring_size);
2745         }
2746         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2747         bp->cp_ring_mask = bp->cp_bit - 1;
2748 }
2749
2750 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
2751 {
2752         if (page_mode) {
2753                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
2754                         return -EOPNOTSUPP;
2755                 bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
2756                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
2757                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
2758                 bp->dev->hw_features &= ~NETIF_F_LRO;
2759                 bp->dev->features &= ~NETIF_F_LRO;
2760                 bp->rx_dir = DMA_BIDIRECTIONAL;
2761                 bp->rx_skb_func = bnxt_rx_page_skb;
2762         } else {
2763                 bp->dev->max_mtu = BNXT_MAX_MTU;
2764                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
2765                 bp->rx_dir = DMA_FROM_DEVICE;
2766                 bp->rx_skb_func = bnxt_rx_skb;
2767         }
2768         return 0;
2769 }
2770
2771 static void bnxt_free_vnic_attributes(struct bnxt *bp)
2772 {
2773         int i;
2774         struct bnxt_vnic_info *vnic;
2775         struct pci_dev *pdev = bp->pdev;
2776
2777         if (!bp->vnic_info)
2778                 return;
2779
2780         for (i = 0; i < bp->nr_vnics; i++) {
2781                 vnic = &bp->vnic_info[i];
2782
2783                 kfree(vnic->fw_grp_ids);
2784                 vnic->fw_grp_ids = NULL;
2785
2786                 kfree(vnic->uc_list);
2787                 vnic->uc_list = NULL;
2788
2789                 if (vnic->mc_list) {
2790                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2791                                           vnic->mc_list, vnic->mc_list_mapping);
2792                         vnic->mc_list = NULL;
2793                 }
2794
2795                 if (vnic->rss_table) {
2796                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
2797                                           vnic->rss_table,
2798                                           vnic->rss_table_dma_addr);
2799                         vnic->rss_table = NULL;
2800                 }
2801
2802                 vnic->rss_hash_key = NULL;
2803                 vnic->flags = 0;
2804         }
2805 }
2806
2807 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2808 {
2809         int i, rc = 0, size;
2810         struct bnxt_vnic_info *vnic;
2811         struct pci_dev *pdev = bp->pdev;
2812         int max_rings;
2813
2814         for (i = 0; i < bp->nr_vnics; i++) {
2815                 vnic = &bp->vnic_info[i];
2816
2817                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2818                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2819
2820                         if (mem_size > 0) {
2821                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2822                                 if (!vnic->uc_list) {
2823                                         rc = -ENOMEM;
2824                                         goto out;
2825                                 }
2826                         }
2827                 }
2828
2829                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2830                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2831                         vnic->mc_list =
2832                                 dma_alloc_coherent(&pdev->dev,
2833                                                    vnic->mc_list_size,
2834                                                    &vnic->mc_list_mapping,
2835                                                    GFP_KERNEL);
2836                         if (!vnic->mc_list) {
2837                                 rc = -ENOMEM;
2838                                 goto out;
2839                         }
2840                 }
2841
2842                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2843                         max_rings = bp->rx_nr_rings;
2844                 else
2845                         max_rings = 1;
2846
2847                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2848                 if (!vnic->fw_grp_ids) {
2849                         rc = -ENOMEM;
2850                         goto out;
2851                 }
2852
2853                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
2854                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
2855                         continue;
2856
2857                 /* Allocate rss table and hash key */
2858                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2859                                                      &vnic->rss_table_dma_addr,
2860                                                      GFP_KERNEL);
2861                 if (!vnic->rss_table) {
2862                         rc = -ENOMEM;
2863                         goto out;
2864                 }
2865
2866                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2867
2868                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2869                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2870         }
2871         return 0;
2872
2873 out:
2874         return rc;
2875 }
2876
2877 static void bnxt_free_hwrm_resources(struct bnxt *bp)
2878 {
2879         struct pci_dev *pdev = bp->pdev;
2880
2881         dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2882                           bp->hwrm_cmd_resp_dma_addr);
2883
2884         bp->hwrm_cmd_resp_addr = NULL;
2885         if (bp->hwrm_dbg_resp_addr) {
2886                 dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2887                                   bp->hwrm_dbg_resp_addr,
2888                                   bp->hwrm_dbg_resp_dma_addr);
2889
2890                 bp->hwrm_dbg_resp_addr = NULL;
2891         }
2892 }
2893
2894 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2895 {
2896         struct pci_dev *pdev = bp->pdev;
2897
2898         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2899                                                    &bp->hwrm_cmd_resp_dma_addr,
2900                                                    GFP_KERNEL);
2901         if (!bp->hwrm_cmd_resp_addr)
2902                 return -ENOMEM;
2903         bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2904                                                     HWRM_DBG_REG_BUF_SIZE,
2905                                                     &bp->hwrm_dbg_resp_dma_addr,
2906                                                     GFP_KERNEL);
2907         if (!bp->hwrm_dbg_resp_addr)
2908                 netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2909
2910         return 0;
2911 }
2912
2913 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
2914 {
2915         if (bp->hwrm_short_cmd_req_addr) {
2916                 struct pci_dev *pdev = bp->pdev;
2917
2918                 dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2919                                   bp->hwrm_short_cmd_req_addr,
2920                                   bp->hwrm_short_cmd_req_dma_addr);
2921                 bp->hwrm_short_cmd_req_addr = NULL;
2922         }
2923 }
2924
2925 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
2926 {
2927         struct pci_dev *pdev = bp->pdev;
2928
2929         bp->hwrm_short_cmd_req_addr =
2930                 dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
2931                                    &bp->hwrm_short_cmd_req_dma_addr,
2932                                    GFP_KERNEL);
2933         if (!bp->hwrm_short_cmd_req_addr)
2934                 return -ENOMEM;
2935
2936         return 0;
2937 }
2938
2939 static void bnxt_free_stats(struct bnxt *bp)
2940 {
2941         u32 size, i;
2942         struct pci_dev *pdev = bp->pdev;
2943
2944         if (bp->hw_rx_port_stats) {
2945                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
2946                                   bp->hw_rx_port_stats,
2947                                   bp->hw_rx_port_stats_map);
2948                 bp->hw_rx_port_stats = NULL;
2949                 bp->flags &= ~BNXT_FLAG_PORT_STATS;
2950         }
2951
2952         if (!bp->bnapi)
2953                 return;
2954
2955         size = sizeof(struct ctx_hw_stats);
2956
2957         for (i = 0; i < bp->cp_nr_rings; i++) {
2958                 struct bnxt_napi *bnapi = bp->bnapi[i];
2959                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2960
2961                 if (cpr->hw_stats) {
2962                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2963                                           cpr->hw_stats_map);
2964                         cpr->hw_stats = NULL;
2965                 }
2966         }
2967 }
2968
2969 static int bnxt_alloc_stats(struct bnxt *bp)
2970 {
2971         u32 size, i;
2972         struct pci_dev *pdev = bp->pdev;
2973
2974         size = sizeof(struct ctx_hw_stats);
2975
2976         for (i = 0; i < bp->cp_nr_rings; i++) {
2977                 struct bnxt_napi *bnapi = bp->bnapi[i];
2978                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2979
2980                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2981                                                    &cpr->hw_stats_map,
2982                                                    GFP_KERNEL);
2983                 if (!cpr->hw_stats)
2984                         return -ENOMEM;
2985
2986                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2987         }
2988
2989         if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
2990                 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
2991                                          sizeof(struct tx_port_stats) + 1024;
2992
2993                 bp->hw_rx_port_stats =
2994                         dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
2995                                            &bp->hw_rx_port_stats_map,
2996                                            GFP_KERNEL);
2997                 if (!bp->hw_rx_port_stats)
2998                         return -ENOMEM;
2999
3000                 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3001                                        512;
3002                 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3003                                            sizeof(struct rx_port_stats) + 512;
3004                 bp->flags |= BNXT_FLAG_PORT_STATS;
3005         }
3006         return 0;
3007 }
3008
3009 static void bnxt_clear_ring_indices(struct bnxt *bp)
3010 {
3011         int i;
3012
3013         if (!bp->bnapi)
3014                 return;
3015
3016         for (i = 0; i < bp->cp_nr_rings; i++) {
3017                 struct bnxt_napi *bnapi = bp->bnapi[i];
3018                 struct bnxt_cp_ring_info *cpr;
3019                 struct bnxt_rx_ring_info *rxr;
3020                 struct bnxt_tx_ring_info *txr;
3021
3022                 if (!bnapi)
3023                         continue;
3024
3025                 cpr = &bnapi->cp_ring;
3026                 cpr->cp_raw_cons = 0;
3027
3028                 txr = bnapi->tx_ring;
3029                 if (txr) {
3030                         txr->tx_prod = 0;
3031                         txr->tx_cons = 0;
3032                 }
3033
3034                 rxr = bnapi->rx_ring;
3035                 if (rxr) {
3036                         rxr->rx_prod = 0;
3037                         rxr->rx_agg_prod = 0;
3038                         rxr->rx_sw_agg_prod = 0;
3039                         rxr->rx_next_cons = 0;
3040                 }
3041         }
3042 }
3043
3044 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3045 {
3046 #ifdef CONFIG_RFS_ACCEL
3047         int i;
3048
3049         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3050          * safe to delete the hash table.
3051          */
3052         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3053                 struct hlist_head *head;
3054                 struct hlist_node *tmp;
3055                 struct bnxt_ntuple_filter *fltr;
3056
3057                 head = &bp->ntp_fltr_hash_tbl[i];
3058                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3059                         hlist_del(&fltr->hash);
3060                         kfree(fltr);
3061                 }
3062         }
3063         if (irq_reinit) {
3064                 kfree(bp->ntp_fltr_bmap);
3065                 bp->ntp_fltr_bmap = NULL;
3066         }
3067         bp->ntp_fltr_count = 0;
3068 #endif
3069 }
3070
3071 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3072 {
3073 #ifdef CONFIG_RFS_ACCEL
3074         int i, rc = 0;
3075
3076         if (!(bp->flags & BNXT_FLAG_RFS))
3077                 return 0;
3078
3079         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3080                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3081
3082         bp->ntp_fltr_count = 0;
3083         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3084                                     sizeof(long),
3085                                     GFP_KERNEL);
3086
3087         if (!bp->ntp_fltr_bmap)
3088                 rc = -ENOMEM;
3089
3090         return rc;
3091 #else
3092         return 0;
3093 #endif
3094 }
3095
3096 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3097 {
3098         bnxt_free_vnic_attributes(bp);
3099         bnxt_free_tx_rings(bp);
3100         bnxt_free_rx_rings(bp);
3101         bnxt_free_cp_rings(bp);
3102         bnxt_free_ntp_fltrs(bp, irq_re_init);
3103         if (irq_re_init) {
3104                 bnxt_free_stats(bp);
3105                 bnxt_free_ring_grps(bp);
3106                 bnxt_free_vnics(bp);
3107                 kfree(bp->tx_ring_map);
3108                 bp->tx_ring_map = NULL;
3109                 kfree(bp->tx_ring);
3110                 bp->tx_ring = NULL;
3111                 kfree(bp->rx_ring);
3112                 bp->rx_ring = NULL;
3113                 kfree(bp->bnapi);
3114                 bp->bnapi = NULL;
3115         } else {
3116                 bnxt_clear_ring_indices(bp);
3117         }
3118 }
3119
3120 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3121 {
3122         int i, j, rc, size, arr_size;
3123         void *bnapi;
3124
3125         if (irq_re_init) {
3126                 /* Allocate bnapi mem pointer array and mem block for
3127                  * all queues
3128                  */
3129                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3130                                 bp->cp_nr_rings);
3131                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3132                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3133                 if (!bnapi)
3134                         return -ENOMEM;
3135
3136                 bp->bnapi = bnapi;
3137                 bnapi += arr_size;
3138                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3139                         bp->bnapi[i] = bnapi;
3140                         bp->bnapi[i]->index = i;
3141                         bp->bnapi[i]->bp = bp;
3142                 }
3143
3144                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3145                                       sizeof(struct bnxt_rx_ring_info),
3146                                       GFP_KERNEL);
3147                 if (!bp->rx_ring)
3148                         return -ENOMEM;
3149
3150                 for (i = 0; i < bp->rx_nr_rings; i++) {
3151                         bp->rx_ring[i].bnapi = bp->bnapi[i];
3152                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3153                 }
3154
3155                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3156                                       sizeof(struct bnxt_tx_ring_info),
3157                                       GFP_KERNEL);
3158                 if (!bp->tx_ring)
3159                         return -ENOMEM;
3160
3161                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3162                                           GFP_KERNEL);
3163
3164                 if (!bp->tx_ring_map)
3165                         return -ENOMEM;
3166
3167                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3168                         j = 0;
3169                 else
3170                         j = bp->rx_nr_rings;
3171
3172                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
3173                         bp->tx_ring[i].bnapi = bp->bnapi[j];
3174                         bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
3175                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
3176                         if (i >= bp->tx_nr_rings_xdp) {
3177                                 bp->tx_ring[i].txq_index = i -
3178                                         bp->tx_nr_rings_xdp;
3179                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
3180                         } else {
3181                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
3182                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3183                         }
3184                 }
3185
3186                 rc = bnxt_alloc_stats(bp);
3187                 if (rc)
3188                         goto alloc_mem_err;
3189
3190                 rc = bnxt_alloc_ntp_fltrs(bp);
3191                 if (rc)
3192                         goto alloc_mem_err;
3193
3194                 rc = bnxt_alloc_vnics(bp);
3195                 if (rc)
3196                         goto alloc_mem_err;
3197         }
3198
3199         bnxt_init_ring_struct(bp);
3200
3201         rc = bnxt_alloc_rx_rings(bp);
3202         if (rc)
3203                 goto alloc_mem_err;
3204
3205         rc = bnxt_alloc_tx_rings(bp);
3206         if (rc)
3207                 goto alloc_mem_err;
3208
3209         rc = bnxt_alloc_cp_rings(bp);
3210         if (rc)
3211                 goto alloc_mem_err;
3212
3213         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3214                                   BNXT_VNIC_UCAST_FLAG;
3215         rc = bnxt_alloc_vnic_attributes(bp);
3216         if (rc)
3217                 goto alloc_mem_err;
3218         return 0;
3219
3220 alloc_mem_err:
3221         bnxt_free_mem(bp, true);
3222         return rc;
3223 }
3224
3225 static void bnxt_disable_int(struct bnxt *bp)
3226 {
3227         int i;
3228
3229         if (!bp->bnapi)
3230                 return;
3231
3232         for (i = 0; i < bp->cp_nr_rings; i++) {
3233                 struct bnxt_napi *bnapi = bp->bnapi[i];
3234                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3235                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3236
3237                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
3238                         BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3239         }
3240 }
3241
3242 static void bnxt_disable_int_sync(struct bnxt *bp)
3243 {
3244         int i;
3245
3246         atomic_inc(&bp->intr_sem);
3247
3248         bnxt_disable_int(bp);
3249         for (i = 0; i < bp->cp_nr_rings; i++)
3250                 synchronize_irq(bp->irq_tbl[i].vector);
3251 }
3252
3253 static void bnxt_enable_int(struct bnxt *bp)
3254 {
3255         int i;
3256
3257         atomic_set(&bp->intr_sem, 0);
3258         for (i = 0; i < bp->cp_nr_rings; i++) {
3259                 struct bnxt_napi *bnapi = bp->bnapi[i];
3260                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3261
3262                 BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
3263         }
3264 }
3265
3266 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3267                             u16 cmpl_ring, u16 target_id)
3268 {
3269         struct input *req = request;
3270
3271         req->req_type = cpu_to_le16(req_type);
3272         req->cmpl_ring = cpu_to_le16(cmpl_ring);
3273         req->target_id = cpu_to_le16(target_id);
3274         req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3275 }
3276
3277 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3278                                  int timeout, bool silent)
3279 {
3280         int i, intr_process, rc, tmo_count;
3281         struct input *req = msg;
3282         u32 *data = msg;
3283         __le32 *resp_len, *valid;
3284         u16 cp_ring_id, len = 0;
3285         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
3286         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
3287
3288         req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
3289         memset(resp, 0, PAGE_SIZE);
3290         cp_ring_id = le16_to_cpu(req->cmpl_ring);
3291         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3292
3293         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
3294                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
3295                 struct hwrm_short_input short_input = {0};
3296
3297                 memcpy(short_cmd_req, req, msg_len);
3298                 memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
3299                                                    msg_len);
3300
3301                 short_input.req_type = req->req_type;
3302                 short_input.signature =
3303                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3304                 short_input.size = cpu_to_le16(msg_len);
3305                 short_input.req_addr =
3306                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3307
3308                 data = (u32 *)&short_input;
3309                 msg_len = sizeof(short_input);
3310
3311                 /* Sync memory write before updating doorbell */
3312                 wmb();
3313
3314                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3315         }
3316
3317         /* Write request msg to hwrm channel */
3318         __iowrite32_copy(bp->bar0, data, msg_len / 4);
3319
3320         for (i = msg_len; i < max_req_len; i += 4)
3321                 writel(0, bp->bar0 + i);
3322
3323         /* currently supports only one outstanding message */
3324         if (intr_process)
3325                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
3326
3327         /* Ring channel doorbell */
3328         writel(1, bp->bar0 + 0x100);
3329
3330         if (!timeout)
3331                 timeout = DFLT_HWRM_CMD_TIMEOUT;
3332
3333         i = 0;
3334         tmo_count = timeout * 40;
3335         if (intr_process) {
3336                 /* Wait until hwrm response cmpl interrupt is processed */
3337                 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
3338                        i++ < tmo_count) {
3339                         usleep_range(25, 40);
3340                 }
3341
3342                 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3343                         netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
3344                                    le16_to_cpu(req->req_type));
3345                         return -1;
3346                 }
3347         } else {
3348                 /* Check if response len is updated */
3349                 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
3350                 for (i = 0; i < tmo_count; i++) {
3351                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3352                               HWRM_RESP_LEN_SFT;
3353                         if (len)
3354                                 break;
3355                         usleep_range(25, 40);
3356                 }
3357
3358                 if (i >= tmo_count) {
3359                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
3360                                    timeout, le16_to_cpu(req->req_type),
3361                                    le16_to_cpu(req->seq_id), len);
3362                         return -1;
3363                 }
3364
3365                 /* Last word of resp contains valid bit */
3366                 valid = bp->hwrm_cmd_resp_addr + len - 4;
3367                 for (i = 0; i < 5; i++) {
3368                         if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
3369                                 break;
3370                         udelay(1);
3371                 }
3372
3373                 if (i >= 5) {
3374                         netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
3375                                    timeout, le16_to_cpu(req->req_type),
3376                                    le16_to_cpu(req->seq_id), len, *valid);
3377                         return -1;
3378                 }
3379         }
3380
3381         rc = le16_to_cpu(resp->error_code);
3382         if (rc && !silent)
3383                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3384                            le16_to_cpu(resp->req_type),
3385                            le16_to_cpu(resp->seq_id), rc);
3386         return rc;
3387 }
3388
3389 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3390 {
3391         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
3392 }
3393
3394 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3395 {
3396         int rc;
3397
3398         mutex_lock(&bp->hwrm_cmd_lock);
3399         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3400         mutex_unlock(&bp->hwrm_cmd_lock);
3401         return rc;
3402 }
3403
3404 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3405                              int timeout)
3406 {
3407         int rc;
3408
3409         mutex_lock(&bp->hwrm_cmd_lock);
3410         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3411         mutex_unlock(&bp->hwrm_cmd_lock);
3412         return rc;
3413 }
3414
3415 int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3416                                      int bmap_size)
3417 {
3418         struct hwrm_func_drv_rgtr_input req = {0};
3419         DECLARE_BITMAP(async_events_bmap, 256);
3420         u32 *events = (u32 *)async_events_bmap;
3421         int i;
3422
3423         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3424
3425         req.enables =
3426                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
3427
3428         memset(async_events_bmap, 0, sizeof(async_events_bmap));
3429         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3430                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3431
3432         if (bmap && bmap_size) {
3433                 for (i = 0; i < bmap_size; i++) {
3434                         if (test_bit(i, bmap))
3435                                 __set_bit(i, async_events_bmap);
3436                 }
3437         }
3438
3439         for (i = 0; i < 8; i++)
3440                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3441
3442         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3443 }
3444
3445 static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3446 {
3447         struct hwrm_func_drv_rgtr_input req = {0};
3448
3449         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3450
3451         req.enables =
3452                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3453                             FUNC_DRV_RGTR_REQ_ENABLES_VER);
3454
3455         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
3456         req.ver_maj = DRV_VER_MAJ;
3457         req.ver_min = DRV_VER_MIN;
3458         req.ver_upd = DRV_VER_UPD;
3459
3460         if (BNXT_PF(bp)) {
3461                 DECLARE_BITMAP(vf_req_snif_bmap, 256);
3462                 u32 *data = (u32 *)vf_req_snif_bmap;
3463                 int i;
3464
3465                 memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
3466                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
3467                         __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
3468
3469                 for (i = 0; i < 8; i++)
3470                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3471
3472                 req.enables |=
3473                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3474         }
3475
3476         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3477 }
3478
3479 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
3480 {
3481         struct hwrm_func_drv_unrgtr_input req = {0};
3482
3483         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
3484         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3485 }
3486
3487 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
3488 {
3489         u32 rc = 0;
3490         struct hwrm_tunnel_dst_port_free_input req = {0};
3491
3492         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
3493         req.tunnel_type = tunnel_type;
3494
3495         switch (tunnel_type) {
3496         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
3497                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
3498                 break;
3499         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
3500                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
3501                 break;
3502         default:
3503                 break;
3504         }
3505
3506         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3507         if (rc)
3508                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
3509                            rc);
3510         return rc;
3511 }
3512
3513 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
3514                                            u8 tunnel_type)
3515 {
3516         u32 rc = 0;
3517         struct hwrm_tunnel_dst_port_alloc_input req = {0};
3518         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3519
3520         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
3521
3522         req.tunnel_type = tunnel_type;
3523         req.tunnel_dst_port_val = port;
3524
3525         mutex_lock(&bp->hwrm_cmd_lock);
3526         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3527         if (rc) {
3528                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
3529                            rc);
3530                 goto err_out;
3531         }
3532
3533         switch (tunnel_type) {
3534         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
3535                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3536                 break;
3537         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
3538                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
3539                 break;
3540         default:
3541                 break;
3542         }
3543
3544 err_out:
3545         mutex_unlock(&bp->hwrm_cmd_lock);
3546         return rc;
3547 }
3548
3549 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
3550 {
3551         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
3552         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3553
3554         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
3555         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3556
3557         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
3558         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
3559         req.mask = cpu_to_le32(vnic->rx_mask);
3560         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3561 }
3562
3563 #ifdef CONFIG_RFS_ACCEL
3564 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
3565                                             struct bnxt_ntuple_filter *fltr)
3566 {
3567         struct hwrm_cfa_ntuple_filter_free_input req = {0};
3568
3569         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
3570         req.ntuple_filter_id = fltr->filter_id;
3571         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3572 }
3573
3574 #define BNXT_NTP_FLTR_FLAGS                                     \
3575         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
3576          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
3577          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
3578          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
3579          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
3580          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
3581          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
3582          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
3583          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
3584          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
3585          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
3586          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
3587          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
3588          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
3589
3590 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
3591                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
3592
3593 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
3594                                              struct bnxt_ntuple_filter *fltr)
3595 {
3596         int rc = 0;
3597         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
3598         struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3599                 bp->hwrm_cmd_resp_addr;
3600         struct flow_keys *keys = &fltr->fkeys;
3601         struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
3602
3603         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
3604         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
3605
3606         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
3607
3608         req.ethertype = htons(ETH_P_IP);
3609         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
3610         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
3611         req.ip_protocol = keys->basic.ip_proto;
3612
3613         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
3614                 int i;
3615
3616                 req.ethertype = htons(ETH_P_IPV6);
3617                 req.ip_addr_type =
3618                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
3619                 *(struct in6_addr *)&req.src_ipaddr[0] =
3620                         keys->addrs.v6addrs.src;
3621                 *(struct in6_addr *)&req.dst_ipaddr[0] =
3622                         keys->addrs.v6addrs.dst;
3623                 for (i = 0; i < 4; i++) {
3624                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3625                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
3626                 }
3627         } else {
3628                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
3629                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3630                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
3631                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
3632         }
3633         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
3634                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
3635                 req.tunnel_type =
3636                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
3637         }
3638
3639         req.src_port = keys->ports.src;
3640         req.src_port_mask = cpu_to_be16(0xffff);
3641         req.dst_port = keys->ports.dst;
3642         req.dst_port_mask = cpu_to_be16(0xffff);
3643
3644         req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
3645         mutex_lock(&bp->hwrm_cmd_lock);
3646         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3647         if (!rc)
3648                 fltr->filter_id = resp->ntuple_filter_id;
3649         mutex_unlock(&bp->hwrm_cmd_lock);
3650         return rc;
3651 }
3652 #endif
3653
3654 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
3655                                      u8 *mac_addr)
3656 {
3657         u32 rc = 0;
3658         struct hwrm_cfa_l2_filter_alloc_input req = {0};
3659         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3660
3661         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
3662         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
3663         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
3664                 req.flags |=
3665                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
3666         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
3667         req.enables =
3668                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
3669                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
3670                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
3671         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
3672         req.l2_addr_mask[0] = 0xff;
3673         req.l2_addr_mask[1] = 0xff;
3674         req.l2_addr_mask[2] = 0xff;
3675         req.l2_addr_mask[3] = 0xff;
3676         req.l2_addr_mask[4] = 0xff;
3677         req.l2_addr_mask[5] = 0xff;
3678
3679         mutex_lock(&bp->hwrm_cmd_lock);
3680         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3681         if (!rc)
3682                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
3683                                                         resp->l2_filter_id;
3684         mutex_unlock(&bp->hwrm_cmd_lock);
3685         return rc;
3686 }
3687
3688 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
3689 {
3690         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
3691         int rc = 0;
3692
3693         /* Any associated ntuple filters will also be cleared by firmware. */
3694         mutex_lock(&bp->hwrm_cmd_lock);
3695         for (i = 0; i < num_of_vnics; i++) {
3696                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3697
3698                 for (j = 0; j < vnic->uc_filter_count; j++) {
3699                         struct hwrm_cfa_l2_filter_free_input req = {0};
3700
3701                         bnxt_hwrm_cmd_hdr_init(bp, &req,
3702                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
3703
3704                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
3705
3706                         rc = _hwrm_send_message(bp, &req, sizeof(req),
3707                                                 HWRM_CMD_TIMEOUT);
3708                 }
3709                 vnic->uc_filter_count = 0;
3710         }
3711         mutex_unlock(&bp->hwrm_cmd_lock);
3712
3713         return rc;
3714 }
3715
3716 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3717 {
3718         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3719         struct hwrm_vnic_tpa_cfg_input req = {0};
3720
3721         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3722
3723         if (tpa_flags) {
3724                 u16 mss = bp->dev->mtu - 40;
3725                 u32 nsegs, n, segs = 0, flags;
3726
3727                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
3728                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
3729                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
3730                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
3731                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
3732                 if (tpa_flags & BNXT_FLAG_GRO)
3733                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
3734
3735                 req.flags = cpu_to_le32(flags);
3736
3737                 req.enables =
3738                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
3739                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
3740                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
3741
3742                 /* Number of segs are log2 units, and first packet is not
3743                  * included as part of this units.
3744                  */
3745                 if (mss <= BNXT_RX_PAGE_SIZE) {
3746                         n = BNXT_RX_PAGE_SIZE / mss;
3747                         nsegs = (MAX_SKB_FRAGS - 1) * n;
3748                 } else {
3749                         n = mss / BNXT_RX_PAGE_SIZE;
3750                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
3751                                 n++;
3752                         nsegs = (MAX_SKB_FRAGS - n) / n;
3753                 }
3754
3755                 segs = ilog2(nsegs);
3756                 req.max_agg_segs = cpu_to_le16(segs);
3757                 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
3758
3759                 req.min_agg_len = cpu_to_le32(512);
3760         }
3761         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3762
3763         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3764 }
3765
3766 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
3767 {
3768         u32 i, j, max_rings;
3769         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3770         struct hwrm_vnic_rss_cfg_input req = {0};
3771
3772         if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
3773                 return 0;
3774
3775         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
3776         if (set_rss) {
3777                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
3778                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
3779                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3780                                 max_rings = bp->rx_nr_rings - 1;
3781                         else
3782                                 max_rings = bp->rx_nr_rings;
3783                 } else {
3784                         max_rings = 1;
3785                 }
3786
3787                 /* Fill the RSS indirection table with ring group ids */
3788                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
3789                         if (j == max_rings)
3790                                 j = 0;
3791                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
3792                 }
3793
3794                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
3795                 req.hash_key_tbl_addr =
3796                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
3797         }
3798         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3799         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3800 }
3801
3802 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
3803 {
3804         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3805         struct hwrm_vnic_plcmodes_cfg_input req = {0};
3806
3807         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3808         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3809                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3810                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3811         req.enables =
3812                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3813                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3814         /* thresholds not implemented in firmware yet */
3815         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3816         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3817         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3818         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3819 }
3820
3821 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
3822                                         u16 ctx_idx)
3823 {
3824         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3825
3826         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3827         req.rss_cos_lb_ctx_id =
3828                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
3829
3830         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3831         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
3832 }
3833
3834 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3835 {
3836         int i, j;
3837
3838         for (i = 0; i < bp->nr_vnics; i++) {
3839                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3840
3841                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
3842                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
3843                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
3844                 }
3845         }
3846         bp->rsscos_nr_ctxs = 0;
3847 }
3848
3849 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
3850 {
3851         int rc;
3852         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3853         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3854                                                 bp->hwrm_cmd_resp_addr;
3855
3856         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3857                                -1);
3858
3859         mutex_lock(&bp->hwrm_cmd_lock);
3860         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3861         if (!rc)
3862                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
3863                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
3864         mutex_unlock(&bp->hwrm_cmd_lock);
3865
3866         return rc;
3867 }
3868
3869 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3870 {
3871         unsigned int ring = 0, grp_idx;
3872         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3873         struct hwrm_vnic_cfg_input req = {0};
3874         u16 def_vlan = 0;
3875
3876         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3877
3878         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
3879         /* Only RSS support for now TBD: COS & LB */
3880         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
3881                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
3882                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3883                                            VNIC_CFG_REQ_ENABLES_MRU);
3884         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
3885                 req.rss_rule =
3886                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
3887                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
3888                                            VNIC_CFG_REQ_ENABLES_MRU);
3889                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
3890         } else {
3891                 req.rss_rule = cpu_to_le16(0xffff);
3892         }
3893
3894         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
3895             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
3896                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
3897                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
3898         } else {
3899                 req.cos_rule = cpu_to_le16(0xffff);
3900         }
3901
3902         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3903                 ring = 0;
3904         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3905                 ring = vnic_id - 1;
3906         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
3907                 ring = bp->rx_nr_rings - 1;
3908
3909         grp_idx = bp->rx_ring[ring].bnapi->index;
3910         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3911         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3912
3913         req.lb_rule = cpu_to_le16(0xffff);
3914         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3915                               VLAN_HLEN);
3916
3917 #ifdef CONFIG_BNXT_SRIOV
3918         if (BNXT_VF(bp))
3919                 def_vlan = bp->vf.vlan;
3920 #endif
3921         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
3922                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3923         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
3924                 req.flags |=
3925                         cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
3926
3927         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3928 }
3929
3930 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3931 {
3932         u32 rc = 0;
3933
3934         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3935                 struct hwrm_vnic_free_input req = {0};
3936
3937                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3938                 req.vnic_id =
3939                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3940
3941                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3942                 if (rc)
3943                         return rc;
3944                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3945         }
3946         return rc;
3947 }
3948
3949 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3950 {
3951         u16 i;
3952
3953         for (i = 0; i < bp->nr_vnics; i++)
3954                 bnxt_hwrm_vnic_free_one(bp, i);
3955 }
3956
3957 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
3958                                 unsigned int start_rx_ring_idx,
3959                                 unsigned int nr_rings)
3960 {
3961         int rc = 0;
3962         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
3963         struct hwrm_vnic_alloc_input req = {0};
3964         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3965
3966         /* map ring groups to this vnic */
3967         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
3968                 grp_idx = bp->rx_ring[i].bnapi->index;
3969                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
3970                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3971                                    j, nr_rings);
3972                         break;
3973                 }
3974                 bp->vnic_info[vnic_id].fw_grp_ids[j] =
3975                                         bp->grp_info[grp_idx].fw_grp_id;
3976         }
3977
3978         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
3979         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
3980         if (vnic_id == 0)
3981                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3982
3983         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3984
3985         mutex_lock(&bp->hwrm_cmd_lock);
3986         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3987         if (!rc)
3988                 bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3989         mutex_unlock(&bp->hwrm_cmd_lock);
3990         return rc;
3991 }
3992
3993 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
3994 {
3995         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3996         struct hwrm_vnic_qcaps_input req = {0};
3997         int rc;
3998
3999         if (bp->hwrm_spec_code < 0x10600)
4000                 return 0;
4001
4002         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4003         mutex_lock(&bp->hwrm_cmd_lock);
4004         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4005         if (!rc) {
4006                 if (resp->flags &
4007                     cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
4008                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
4009         }
4010         mutex_unlock(&bp->hwrm_cmd_lock);
4011         return rc;
4012 }
4013
4014 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4015 {
4016         u16 i;
4017         u32 rc = 0;
4018
4019         mutex_lock(&bp->hwrm_cmd_lock);
4020         for (i = 0; i < bp->rx_nr_rings; i++) {
4021                 struct hwrm_ring_grp_alloc_input req = {0};
4022                 struct hwrm_ring_grp_alloc_output *resp =
4023                                         bp->hwrm_cmd_resp_addr;
4024                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
4025
4026                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4027
4028                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4029                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4030                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4031                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
4032
4033                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4034                                         HWRM_CMD_TIMEOUT);
4035                 if (rc)
4036                         break;
4037
4038                 bp->grp_info[grp_idx].fw_grp_id =
4039                         le32_to_cpu(resp->ring_group_id);
4040         }
4041         mutex_unlock(&bp->hwrm_cmd_lock);
4042         return rc;
4043 }
4044
4045 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4046 {
4047         u16 i;
4048         u32 rc = 0;
4049         struct hwrm_ring_grp_free_input req = {0};
4050
4051         if (!bp->grp_info)
4052                 return 0;
4053
4054         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4055
4056         mutex_lock(&bp->hwrm_cmd_lock);
4057         for (i = 0; i < bp->cp_nr_rings; i++) {
4058                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4059                         continue;
4060                 req.ring_group_id =
4061                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
4062
4063                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4064                                         HWRM_CMD_TIMEOUT);
4065                 if (rc)
4066                         break;
4067                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4068         }
4069         mutex_unlock(&bp->hwrm_cmd_lock);
4070         return rc;
4071 }
4072
4073 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4074                                     struct bnxt_ring_struct *ring,
4075                                     u32 ring_type, u32 map_index,
4076                                     u32 stats_ctx_id)
4077 {
4078         int rc = 0, err = 0;
4079         struct hwrm_ring_alloc_input req = {0};
4080         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4081         u16 ring_id;
4082
4083         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4084
4085         req.enables = 0;
4086         if (ring->nr_pages > 1) {
4087                 req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
4088                 /* Page size is in log2 units */
4089                 req.page_size = BNXT_PAGE_SHIFT;
4090                 req.page_tbl_depth = 1;
4091         } else {
4092                 req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
4093         }
4094         req.fbo = 0;
4095         /* Association of ring index with doorbell index and MSIX number */
4096         req.logical_id = cpu_to_le16(map_index);
4097
4098         switch (ring_type) {
4099         case HWRM_RING_ALLOC_TX:
4100                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4101                 /* Association of transmit ring with completion ring */
4102                 req.cmpl_ring_id =
4103                         cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
4104                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
4105                 req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
4106                 req.queue_id = cpu_to_le16(ring->queue_id);
4107                 break;
4108         case HWRM_RING_ALLOC_RX:
4109                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4110                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
4111                 break;
4112         case HWRM_RING_ALLOC_AGG:
4113                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4114                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4115                 break;
4116         case HWRM_RING_ALLOC_CMPL:
4117                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
4118                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
4119                 if (bp->flags & BNXT_FLAG_USING_MSIX)
4120                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4121                 break;
4122         default:
4123                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4124                            ring_type);
4125                 return -1;
4126         }
4127
4128         mutex_lock(&bp->hwrm_cmd_lock);
4129         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4130         err = le16_to_cpu(resp->error_code);
4131         ring_id = le16_to_cpu(resp->ring_id);
4132         mutex_unlock(&bp->hwrm_cmd_lock);
4133
4134         if (rc || err) {
4135                 switch (ring_type) {
4136                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4137                         netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
4138                                    rc, err);
4139                         return -1;
4140
4141                 case RING_FREE_REQ_RING_TYPE_RX:
4142                         netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
4143                                    rc, err);
4144                         return -1;
4145
4146                 case RING_FREE_REQ_RING_TYPE_TX:
4147                         netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
4148                                    rc, err);
4149                         return -1;
4150
4151                 default:
4152                         netdev_err(bp->dev, "Invalid ring\n");
4153                         return -1;
4154                 }
4155         }
4156         ring->fw_ring_id = ring_id;
4157         return rc;
4158 }
4159
4160 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4161 {
4162         int rc;
4163
4164         if (BNXT_PF(bp)) {
4165                 struct hwrm_func_cfg_input req = {0};
4166
4167                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4168                 req.fid = cpu_to_le16(0xffff);
4169                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4170                 req.async_event_cr = cpu_to_le16(idx);
4171                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4172         } else {
4173                 struct hwrm_func_vf_cfg_input req = {0};
4174
4175                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4176                 req.enables =
4177                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4178                 req.async_event_cr = cpu_to_le16(idx);
4179                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4180         }
4181         return rc;
4182 }
4183
4184 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4185 {
4186         int i, rc = 0;
4187
4188         for (i = 0; i < bp->cp_nr_rings; i++) {
4189                 struct bnxt_napi *bnapi = bp->bnapi[i];
4190                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4191                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4192
4193                 cpr->cp_doorbell = bp->bar1 + i * 0x80;
4194                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
4195                                               INVALID_STATS_CTX_ID);
4196                 if (rc)
4197                         goto err_out;
4198                 BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
4199                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
4200
4201                 if (!i) {
4202                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4203                         if (rc)
4204                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4205                 }
4206         }
4207
4208         for (i = 0; i < bp->tx_nr_rings; i++) {
4209                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4210                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4211                 u32 map_idx = txr->bnapi->index;
4212                 u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
4213
4214                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
4215                                               map_idx, fw_stats_ctx);
4216                 if (rc)
4217                         goto err_out;
4218                 txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
4219         }
4220
4221         for (i = 0; i < bp->rx_nr_rings; i++) {
4222                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4223                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4224                 u32 map_idx = rxr->bnapi->index;
4225
4226                 rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
4227                                               map_idx, INVALID_STATS_CTX_ID);
4228                 if (rc)
4229                         goto err_out;
4230                 rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
4231                 writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
4232                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
4233         }
4234
4235         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4236                 for (i = 0; i < bp->rx_nr_rings; i++) {
4237                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4238                         struct bnxt_ring_struct *ring =
4239                                                 &rxr->rx_agg_ring_struct;
4240                         u32 grp_idx = rxr->bnapi->index;
4241                         u32 map_idx = grp_idx + bp->rx_nr_rings;
4242
4243                         rc = hwrm_ring_alloc_send_msg(bp, ring,
4244                                                       HWRM_RING_ALLOC_AGG,
4245                                                       map_idx,
4246                                                       INVALID_STATS_CTX_ID);
4247                         if (rc)
4248                                 goto err_out;
4249
4250                         rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
4251                         writel(DB_KEY_RX | rxr->rx_agg_prod,
4252                                rxr->rx_agg_doorbell);
4253                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
4254                 }
4255         }
4256 err_out:
4257         return rc;
4258 }
4259
4260 static int hwrm_ring_free_send_msg(struct bnxt *bp,
4261                                    struct bnxt_ring_struct *ring,
4262                                    u32 ring_type, int cmpl_ring_id)
4263 {
4264         int rc;
4265         struct hwrm_ring_free_input req = {0};
4266         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
4267         u16 error_code;
4268
4269         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
4270         req.ring_type = ring_type;
4271         req.ring_id = cpu_to_le16(ring->fw_ring_id);
4272
4273         mutex_lock(&bp->hwrm_cmd_lock);
4274         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4275         error_code = le16_to_cpu(resp->error_code);
4276         mutex_unlock(&bp->hwrm_cmd_lock);
4277
4278         if (rc || error_code) {
4279                 switch (ring_type) {
4280                 case RING_FREE_REQ_RING_TYPE_L2_CMPL:
4281                         netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
4282                                    rc);
4283                         return rc;
4284                 case RING_FREE_REQ_RING_TYPE_RX:
4285                         netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
4286                                    rc);
4287                         return rc;
4288                 case RING_FREE_REQ_RING_TYPE_TX:
4289                         netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
4290                                    rc);
4291                         return rc;
4292                 default:
4293                         netdev_err(bp->dev, "Invalid ring\n");
4294                         return -1;
4295                 }
4296         }
4297         return 0;
4298 }
4299
4300 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
4301 {
4302         int i;
4303
4304         if (!bp->bnapi)
4305                 return;
4306
4307         for (i = 0; i < bp->tx_nr_rings; i++) {
4308                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4309                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
4310                 u32 grp_idx = txr->bnapi->index;
4311                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4312
4313                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4314                         hwrm_ring_free_send_msg(bp, ring,
4315                                                 RING_FREE_REQ_RING_TYPE_TX,
4316                                                 close_path ? cmpl_ring_id :
4317                                                 INVALID_HW_RING_ID);
4318                         ring->fw_ring_id = INVALID_HW_RING_ID;
4319                 }
4320         }
4321
4322         for (i = 0; i < bp->rx_nr_rings; i++) {
4323                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4324                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
4325                 u32 grp_idx = rxr->bnapi->index;
4326                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4327
4328                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4329                         hwrm_ring_free_send_msg(bp, ring,
4330                                                 RING_FREE_REQ_RING_TYPE_RX,
4331                                                 close_path ? cmpl_ring_id :
4332                                                 INVALID_HW_RING_ID);
4333                         ring->fw_ring_id = INVALID_HW_RING_ID;
4334                         bp->grp_info[grp_idx].rx_fw_ring_id =
4335                                 INVALID_HW_RING_ID;
4336                 }
4337         }
4338
4339         for (i = 0; i < bp->rx_nr_rings; i++) {
4340                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4341                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
4342                 u32 grp_idx = rxr->bnapi->index;
4343                 u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
4344
4345                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4346                         hwrm_ring_free_send_msg(bp, ring,
4347                                                 RING_FREE_REQ_RING_TYPE_RX,
4348                                                 close_path ? cmpl_ring_id :
4349                                                 INVALID_HW_RING_ID);
4350                         ring->fw_ring_id = INVALID_HW_RING_ID;
4351                         bp->grp_info[grp_idx].agg_fw_ring_id =
4352                                 INVALID_HW_RING_ID;
4353                 }
4354         }
4355
4356         /* The completion rings are about to be freed.  After that the
4357          * IRQ doorbell will not work anymore.  So we need to disable
4358          * IRQ here.
4359          */
4360         bnxt_disable_int_sync(bp);
4361
4362         for (i = 0; i < bp->cp_nr_rings; i++) {
4363                 struct bnxt_napi *bnapi = bp->bnapi[i];
4364                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4365                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4366
4367                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
4368                         hwrm_ring_free_send_msg(bp, ring,
4369                                                 RING_FREE_REQ_RING_TYPE_L2_CMPL,
4370                                                 INVALID_HW_RING_ID);
4371                         ring->fw_ring_id = INVALID_HW_RING_ID;
4372                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
4373                 }
4374         }
4375 }
4376
4377 /* Caller must hold bp->hwrm_cmd_lock */
4378 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4379 {
4380         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4381         struct hwrm_func_qcfg_input req = {0};
4382         int rc;
4383
4384         if (bp->hwrm_spec_code < 0x10601)
4385                 return 0;
4386
4387         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4388         req.fid = cpu_to_le16(fid);
4389         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4390         if (!rc)
4391                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
4392
4393         return rc;
4394 }
4395
4396 static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
4397 {
4398         struct hwrm_func_cfg_input req = {0};
4399         int rc;
4400
4401         if (bp->hwrm_spec_code < 0x10601)
4402                 return 0;
4403
4404         if (BNXT_VF(bp))
4405                 return 0;
4406
4407         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4408         req.fid = cpu_to_le16(0xffff);
4409         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
4410         req.num_tx_rings = cpu_to_le16(*tx_rings);
4411         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4412         if (rc)
4413                 return rc;
4414
4415         mutex_lock(&bp->hwrm_cmd_lock);
4416         rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
4417         mutex_unlock(&bp->hwrm_cmd_lock);
4418         return rc;
4419 }
4420
4421 static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
4422         u32 buf_tmrs, u16 flags,
4423         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4424 {
4425         req->flags = cpu_to_le16(flags);
4426         req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs);
4427         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16);
4428         req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs);
4429         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16);
4430         /* Minimum time between 2 interrupts set to buf_tmr x 2 */
4431         req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2);
4432         req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4);
4433         req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4);
4434 }
4435
4436 int bnxt_hwrm_set_coal(struct bnxt *bp)
4437 {
4438         int i, rc = 0;
4439         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
4440                                                            req_tx = {0}, *req;
4441         u16 max_buf, max_buf_irq;
4442         u16 buf_tmr, buf_tmr_irq;
4443         u32 flags;
4444
4445         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
4446                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4447         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
4448                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
4449
4450         /* Each rx completion (2 records) should be DMAed immediately.
4451          * DMA 1/4 of the completion buffers at a time.
4452          */
4453         max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2);
4454         /* max_buf must not be zero */
4455         max_buf = clamp_t(u16, max_buf, 1, 63);
4456         max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63);
4457         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks);
4458         /* buf timer set to 1/4 of interrupt timer */
4459         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4460         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq);
4461         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4462
4463         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4464
4465         /* RING_IDLE generates more IRQs for lower latency.  Enable it only
4466          * if coal_ticks is less than 25 us.
4467          */
4468         if (bp->rx_coal_ticks < 25)
4469                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
4470
4471         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4472                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
4473
4474         /* max_buf must not be zero */
4475         max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63);
4476         max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63);
4477         buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks);
4478         /* buf timer set to 1/4 of interrupt timer */
4479         buf_tmr = max_t(u16, buf_tmr / 4, 1);
4480         buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq);
4481         buf_tmr_irq = max_t(u16, buf_tmr_irq, 1);
4482
4483         flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
4484         bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf,
4485                                   buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
4486
4487         mutex_lock(&bp->hwrm_cmd_lock);
4488         for (i = 0; i < bp->cp_nr_rings; i++) {
4489                 struct bnxt_napi *bnapi = bp->bnapi[i];
4490
4491                 req = &req_rx;
4492                 if (!bnapi->rx_ring)
4493                         req = &req_tx;
4494                 req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
4495
4496                 rc = _hwrm_send_message(bp, req, sizeof(*req),
4497                                         HWRM_CMD_TIMEOUT);
4498                 if (rc)
4499                         break;
4500         }
4501         mutex_unlock(&bp->hwrm_cmd_lock);
4502         return rc;
4503 }
4504
4505 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
4506 {
4507         int rc = 0, i;
4508         struct hwrm_stat_ctx_free_input req = {0};
4509
4510         if (!bp->bnapi)
4511                 return 0;
4512
4513         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4514                 return 0;
4515
4516         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
4517
4518         mutex_lock(&bp->hwrm_cmd_lock);
4519         for (i = 0; i < bp->cp_nr_rings; i++) {
4520                 struct bnxt_napi *bnapi = bp->bnapi[i];
4521                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4522
4523                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
4524                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
4525
4526                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4527                                                 HWRM_CMD_TIMEOUT);
4528                         if (rc)
4529                                 break;
4530
4531                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4532                 }
4533         }
4534         mutex_unlock(&bp->hwrm_cmd_lock);
4535         return rc;
4536 }
4537
4538 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
4539 {
4540         int rc = 0, i;
4541         struct hwrm_stat_ctx_alloc_input req = {0};
4542         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4543
4544         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4545                 return 0;
4546
4547         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
4548
4549         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
4550
4551         mutex_lock(&bp->hwrm_cmd_lock);
4552         for (i = 0; i < bp->cp_nr_rings; i++) {
4553                 struct bnxt_napi *bnapi = bp->bnapi[i];
4554                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4555
4556                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
4557
4558                 rc = _hwrm_send_message(bp, &req, sizeof(req),
4559                                         HWRM_CMD_TIMEOUT);
4560                 if (rc)
4561                         break;
4562
4563                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
4564
4565                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
4566         }
4567         mutex_unlock(&bp->hwrm_cmd_lock);
4568         return rc;
4569 }
4570
4571 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
4572 {
4573         struct hwrm_func_qcfg_input req = {0};
4574         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4575         int rc;
4576
4577         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
4578         req.fid = cpu_to_le16(0xffff);
4579         mutex_lock(&bp->hwrm_cmd_lock);
4580         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4581         if (rc)
4582                 goto func_qcfg_exit;
4583
4584 #ifdef CONFIG_BNXT_SRIOV
4585         if (BNXT_VF(bp)) {
4586                 struct bnxt_vf_info *vf = &bp->vf;
4587
4588                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
4589         }
4590 #endif
4591         if (BNXT_PF(bp)) {
4592                 u16 flags = le16_to_cpu(resp->flags);
4593
4594                 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
4595                              FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
4596                         bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
4597                 if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
4598                         bp->flags |= BNXT_FLAG_MULTI_HOST;
4599         }
4600
4601         switch (resp->port_partition_type) {
4602         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
4603         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
4604         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
4605                 bp->port_partition_type = resp->port_partition_type;
4606                 break;
4607         }
4608
4609 func_qcfg_exit:
4610         mutex_unlock(&bp->hwrm_cmd_lock);
4611         return rc;
4612 }
4613
4614 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
4615 {
4616         int rc = 0;
4617         struct hwrm_func_qcaps_input req = {0};
4618         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4619
4620         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
4621         req.fid = cpu_to_le16(0xffff);
4622
4623         mutex_lock(&bp->hwrm_cmd_lock);
4624         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4625         if (rc)
4626                 goto hwrm_func_qcaps_exit;
4627
4628         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
4629                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
4630         if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
4631                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
4632
4633         bp->tx_push_thresh = 0;
4634         if (resp->flags &
4635             cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
4636                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
4637
4638         if (BNXT_PF(bp)) {
4639                 struct bnxt_pf_info *pf = &bp->pf;
4640
4641                 pf->fw_fid = le16_to_cpu(resp->fid);
4642                 pf->port_id = le16_to_cpu(resp->port_id);
4643                 bp->dev->dev_port = pf->port_id;
4644                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
4645                 memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
4646                 pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4647                 pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4648                 pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4649                 pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4650                 pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4651                 if (!pf->max_hw_ring_grps)
4652                         pf->max_hw_ring_grps = pf->max_tx_rings;
4653                 pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4654                 pf->max_vnics = le16_to_cpu(resp->max_vnics);
4655                 pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4656                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
4657                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
4658                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
4659                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
4660                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
4661                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
4662                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
4663                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
4664                 if (resp->flags &
4665                     cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
4666                         bp->flags |= BNXT_FLAG_WOL_CAP;
4667         } else {
4668 #ifdef CONFIG_BNXT_SRIOV
4669                 struct bnxt_vf_info *vf = &bp->vf;
4670
4671                 vf->fw_fid = le16_to_cpu(resp->fid);
4672
4673                 vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
4674                 vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
4675                 vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
4676                 vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
4677                 vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
4678                 if (!vf->max_hw_ring_grps)
4679                         vf->max_hw_ring_grps = vf->max_tx_rings;
4680                 vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
4681                 vf->max_vnics = le16_to_cpu(resp->max_vnics);
4682                 vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
4683
4684                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
4685                 mutex_unlock(&bp->hwrm_cmd_lock);
4686
4687                 if (is_valid_ether_addr(vf->mac_addr)) {
4688                         /* overwrite netdev dev_adr with admin VF MAC */
4689                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
4690                 } else {
4691                         eth_hw_addr_random(bp->dev);
4692                         rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
4693                 }
4694                 return rc;
4695 #endif
4696         }
4697
4698 hwrm_func_qcaps_exit:
4699         mutex_unlock(&bp->hwrm_cmd_lock);
4700         return rc;
4701 }
4702
4703 static int bnxt_hwrm_func_reset(struct bnxt *bp)
4704 {
4705         struct hwrm_func_reset_input req = {0};
4706
4707         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
4708         req.enables = 0;
4709
4710         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
4711 }
4712
4713 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
4714 {
4715         int rc = 0;
4716         struct hwrm_queue_qportcfg_input req = {0};
4717         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
4718         u8 i, *qptr;
4719
4720         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
4721
4722         mutex_lock(&bp->hwrm_cmd_lock);
4723         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4724         if (rc)
4725                 goto qportcfg_exit;
4726
4727         if (!resp->max_configurable_queues) {
4728                 rc = -EINVAL;
4729                 goto qportcfg_exit;
4730         }
4731         bp->max_tc = resp->max_configurable_queues;
4732         bp->max_lltc = resp->max_configurable_lossless_queues;
4733         if (bp->max_tc > BNXT_MAX_QUEUE)
4734                 bp->max_tc = BNXT_MAX_QUEUE;
4735
4736         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
4737                 bp->max_tc = 1;
4738
4739         if (bp->max_lltc > bp->max_tc)
4740                 bp->max_lltc = bp->max_tc;
4741
4742         qptr = &resp->queue_id0;
4743         for (i = 0; i < bp->max_tc; i++) {
4744                 bp->q_info[i].queue_id = *qptr++;
4745                 bp->q_info[i].queue_profile = *qptr++;
4746         }
4747
4748 qportcfg_exit:
4749         mutex_unlock(&bp->hwrm_cmd_lock);
4750         return rc;
4751 }
4752
4753 static int bnxt_hwrm_ver_get(struct bnxt *bp)
4754 {
4755         int rc;
4756         struct hwrm_ver_get_input req = {0};
4757         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
4758         u32 dev_caps_cfg;
4759
4760         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
4761         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
4762         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
4763         req.hwrm_intf_min = HWRM_VERSION_MINOR;
4764         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
4765         mutex_lock(&bp->hwrm_cmd_lock);
4766         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4767         if (rc)
4768                 goto hwrm_ver_get_exit;
4769
4770         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
4771
4772         bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
4773                              resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
4774         if (resp->hwrm_intf_maj < 1) {
4775                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
4776                             resp->hwrm_intf_maj, resp->hwrm_intf_min,
4777                             resp->hwrm_intf_upd);
4778                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
4779         }
4780         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d",
4781                  resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
4782                  resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
4783
4784         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
4785         if (!bp->hwrm_cmd_timeout)
4786                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
4787
4788         if (resp->hwrm_intf_maj >= 1)
4789                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
4790
4791         bp->chip_num = le16_to_cpu(resp->chip_num);
4792         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
4793             !resp->chip_metal)
4794                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
4795
4796         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
4797         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
4798             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
4799                 bp->flags |= BNXT_FLAG_SHORT_CMD;
4800
4801 hwrm_ver_get_exit:
4802         mutex_unlock(&bp->hwrm_cmd_lock);
4803         return rc;
4804 }
4805
4806 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
4807 {
4808 #if IS_ENABLED(CONFIG_RTC_LIB)
4809         struct hwrm_fw_set_time_input req = {0};
4810         struct rtc_time tm;
4811         struct timeval tv;
4812
4813         if (bp->hwrm_spec_code < 0x10400)
4814                 return -EOPNOTSUPP;
4815
4816         do_gettimeofday(&tv);
4817         rtc_time_to_tm(tv.tv_sec, &tm);
4818         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
4819         req.year = cpu_to_le16(1900 + tm.tm_year);
4820         req.month = 1 + tm.tm_mon;
4821         req.day = tm.tm_mday;
4822         req.hour = tm.tm_hour;
4823         req.minute = tm.tm_min;
4824         req.second = tm.tm_sec;
4825         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4826 #else
4827         return -EOPNOTSUPP;
4828 #endif
4829 }
4830
4831 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
4832 {
4833         int rc;
4834         struct bnxt_pf_info *pf = &bp->pf;
4835         struct hwrm_port_qstats_input req = {0};
4836
4837         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
4838                 return 0;
4839
4840         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
4841         req.port_id = cpu_to_le16(pf->port_id);
4842         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
4843         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
4844         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4845         return rc;
4846 }
4847
4848 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
4849 {
4850         if (bp->vxlan_port_cnt) {
4851                 bnxt_hwrm_tunnel_dst_port_free(
4852                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
4853         }
4854         bp->vxlan_port_cnt = 0;
4855         if (bp->nge_port_cnt) {
4856                 bnxt_hwrm_tunnel_dst_port_free(
4857                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
4858         }
4859         bp->nge_port_cnt = 0;
4860 }
4861
4862 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
4863 {
4864         int rc, i;
4865         u32 tpa_flags = 0;
4866
4867         if (set_tpa)
4868                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
4869         for (i = 0; i < bp->nr_vnics; i++) {
4870                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
4871                 if (rc) {
4872                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
4873                                    i, rc);
4874                         return rc;
4875                 }
4876         }
4877         return 0;
4878 }
4879
4880 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
4881 {
4882         int i;
4883
4884         for (i = 0; i < bp->nr_vnics; i++)
4885                 bnxt_hwrm_vnic_set_rss(bp, i, false);
4886 }
4887
4888 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
4889                                     bool irq_re_init)
4890 {
4891         if (bp->vnic_info) {
4892                 bnxt_hwrm_clear_vnic_filter(bp);
4893                 /* clear all RSS setting before free vnic ctx */
4894                 bnxt_hwrm_clear_vnic_rss(bp);
4895                 bnxt_hwrm_vnic_ctx_free(bp);
4896                 /* before free the vnic, undo the vnic tpa settings */
4897                 if (bp->flags & BNXT_FLAG_TPA)
4898                         bnxt_set_tpa(bp, false);
4899                 bnxt_hwrm_vnic_free(bp);
4900         }
4901         bnxt_hwrm_ring_free(bp, close_path);
4902         bnxt_hwrm_ring_grp_free(bp);
4903         if (irq_re_init) {
4904                 bnxt_hwrm_stat_ctx_free(bp);
4905                 bnxt_hwrm_free_tunnel_ports(bp);
4906         }
4907 }
4908
4909 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
4910 {
4911         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4912         int rc;
4913
4914         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
4915                 goto skip_rss_ctx;
4916
4917         /* allocate context for vnic */
4918         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
4919         if (rc) {
4920                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4921                            vnic_id, rc);
4922                 goto vnic_setup_err;
4923         }
4924         bp->rsscos_nr_ctxs++;
4925
4926         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
4927                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
4928                 if (rc) {
4929                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
4930                                    vnic_id, rc);
4931                         goto vnic_setup_err;
4932                 }
4933                 bp->rsscos_nr_ctxs++;
4934         }
4935
4936 skip_rss_ctx:
4937         /* configure default vnic, ring grp */
4938         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
4939         if (rc) {
4940                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
4941                            vnic_id, rc);
4942                 goto vnic_setup_err;
4943         }
4944
4945         /* Enable RSS hashing on vnic */
4946         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
4947         if (rc) {
4948                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
4949                            vnic_id, rc);
4950                 goto vnic_setup_err;
4951         }
4952
4953         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
4954                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
4955                 if (rc) {
4956                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
4957                                    vnic_id, rc);
4958                 }
4959         }
4960
4961 vnic_setup_err:
4962         return rc;
4963 }
4964
4965 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
4966 {
4967 #ifdef CONFIG_RFS_ACCEL
4968         int i, rc = 0;
4969
4970         for (i = 0; i < bp->rx_nr_rings; i++) {
4971                 struct bnxt_vnic_info *vnic;
4972                 u16 vnic_id = i + 1;
4973                 u16 ring_id = i;
4974
4975                 if (vnic_id >= bp->nr_vnics)
4976                         break;
4977
4978                 vnic = &bp->vnic_info[vnic_id];
4979                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
4980                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
4981                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
4982                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
4983                 if (rc) {
4984                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
4985                                    vnic_id, rc);
4986                         break;
4987                 }
4988                 rc = bnxt_setup_vnic(bp, vnic_id);
4989                 if (rc)
4990                         break;
4991         }
4992         return rc;
4993 #else
4994         return 0;
4995 #endif
4996 }
4997
4998 /* Allow PF and VF with default VLAN to be in promiscuous mode */
4999 static bool bnxt_promisc_ok(struct bnxt *bp)
5000 {
5001 #ifdef CONFIG_BNXT_SRIOV
5002         if (BNXT_VF(bp) && !bp->vf.vlan)
5003                 return false;
5004 #endif
5005         return true;
5006 }
5007
5008 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
5009 {
5010         unsigned int rc = 0;
5011
5012         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
5013         if (rc) {
5014                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5015                            rc);
5016                 return rc;
5017         }
5018
5019         rc = bnxt_hwrm_vnic_cfg(bp, 1);
5020         if (rc) {
5021                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
5022                            rc);
5023                 return rc;
5024         }
5025         return rc;
5026 }
5027
5028 static int bnxt_cfg_rx_mode(struct bnxt *);
5029 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
5030
5031 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5032 {
5033         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
5034         int rc = 0;
5035         unsigned int rx_nr_rings = bp->rx_nr_rings;
5036
5037         if (irq_re_init) {
5038                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
5039                 if (rc) {
5040                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
5041                                    rc);
5042                         goto err_out;
5043                 }
5044         }
5045
5046         rc = bnxt_hwrm_ring_alloc(bp);
5047         if (rc) {
5048                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
5049                 goto err_out;
5050         }
5051
5052         rc = bnxt_hwrm_ring_grp_alloc(bp);
5053         if (rc) {
5054                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
5055                 goto err_out;
5056         }
5057
5058         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5059                 rx_nr_rings--;
5060
5061         /* default vnic 0 */
5062         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
5063         if (rc) {
5064                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
5065                 goto err_out;
5066         }
5067
5068         rc = bnxt_setup_vnic(bp, 0);
5069         if (rc)
5070                 goto err_out;
5071
5072         if (bp->flags & BNXT_FLAG_RFS) {
5073                 rc = bnxt_alloc_rfs_vnics(bp);
5074                 if (rc)
5075                         goto err_out;
5076         }
5077
5078         if (bp->flags & BNXT_FLAG_TPA) {
5079                 rc = bnxt_set_tpa(bp, true);
5080                 if (rc)
5081                         goto err_out;
5082         }
5083
5084         if (BNXT_VF(bp))
5085                 bnxt_update_vf_mac(bp);
5086
5087         /* Filter for default vnic 0 */
5088         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
5089         if (rc) {
5090                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
5091                 goto err_out;
5092         }
5093         vnic->uc_filter_count = 1;
5094
5095         vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5096
5097         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5098                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
5099
5100         if (bp->dev->flags & IFF_ALLMULTI) {
5101                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
5102                 vnic->mc_list_count = 0;
5103         } else {
5104                 u32 mask = 0;
5105
5106                 bnxt_mc_list_updated(bp, &mask);
5107                 vnic->rx_mask |= mask;
5108         }
5109
5110         rc = bnxt_cfg_rx_mode(bp);
5111         if (rc)
5112                 goto err_out;
5113
5114         rc = bnxt_hwrm_set_coal(bp);
5115         if (rc)
5116                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
5117                                 rc);
5118
5119         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5120                 rc = bnxt_setup_nitroa0_vnic(bp);
5121                 if (rc)
5122                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
5123                                    rc);
5124         }
5125
5126         if (BNXT_VF(bp)) {
5127                 bnxt_hwrm_func_qcfg(bp);
5128                 netdev_update_features(bp->dev);
5129         }
5130
5131         return 0;
5132
5133 err_out:
5134         bnxt_hwrm_resource_free(bp, 0, true);
5135
5136         return rc;
5137 }
5138
5139 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
5140 {
5141         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
5142         return 0;
5143 }
5144
5145 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
5146 {
5147         bnxt_init_cp_rings(bp);
5148         bnxt_init_rx_rings(bp);
5149         bnxt_init_tx_rings(bp);
5150         bnxt_init_ring_grps(bp, irq_re_init);
5151         bnxt_init_vnics(bp);
5152
5153         return bnxt_init_chip(bp, irq_re_init);
5154 }
5155
5156 static int bnxt_set_real_num_queues(struct bnxt *bp)
5157 {
5158         int rc;
5159         struct net_device *dev = bp->dev;
5160
5161         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
5162                                           bp->tx_nr_rings_xdp);
5163         if (rc)
5164                 return rc;
5165
5166         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
5167         if (rc)
5168                 return rc;
5169
5170 #ifdef CONFIG_RFS_ACCEL
5171         if (bp->flags & BNXT_FLAG_RFS)
5172                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
5173 #endif
5174
5175         return rc;
5176 }
5177
5178 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5179                            bool shared)
5180 {
5181         int _rx = *rx, _tx = *tx;
5182
5183         if (shared) {
5184                 *rx = min_t(int, _rx, max);
5185                 *tx = min_t(int, _tx, max);
5186         } else {
5187                 if (max < 2)
5188                         return -ENOMEM;
5189
5190                 while (_rx + _tx > max) {
5191                         if (_rx > _tx && _rx > 1)
5192                                 _rx--;
5193                         else if (_tx > 1)
5194                                 _tx--;
5195                 }
5196                 *rx = _rx;
5197                 *tx = _tx;
5198         }
5199         return 0;
5200 }
5201
5202 static void bnxt_setup_msix(struct bnxt *bp)
5203 {
5204         const int len = sizeof(bp->irq_tbl[0].name);
5205         struct net_device *dev = bp->dev;
5206         int tcs, i;
5207
5208         tcs = netdev_get_num_tc(dev);
5209         if (tcs > 1) {
5210                 int i, off, count;
5211
5212                 for (i = 0; i < tcs; i++) {
5213                         count = bp->tx_nr_rings_per_tc;
5214                         off = i * count;
5215                         netdev_set_tc_queue(dev, i, count, off);
5216                 }
5217         }
5218
5219         for (i = 0; i < bp->cp_nr_rings; i++) {
5220                 char *attr;
5221
5222                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5223                         attr = "TxRx";
5224                 else if (i < bp->rx_nr_rings)
5225                         attr = "rx";
5226                 else
5227                         attr = "tx";
5228
5229                 snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
5230                          i);
5231                 bp->irq_tbl[i].handler = bnxt_msix;
5232         }
5233 }
5234
5235 static void bnxt_setup_inta(struct bnxt *bp)
5236 {
5237         const int len = sizeof(bp->irq_tbl[0].name);
5238
5239         if (netdev_get_num_tc(bp->dev))
5240                 netdev_reset_tc(bp->dev);
5241
5242         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
5243                  0);
5244         bp->irq_tbl[0].handler = bnxt_inta;
5245 }
5246
5247 static int bnxt_setup_int_mode(struct bnxt *bp)
5248 {
5249         int rc;
5250
5251         if (bp->flags & BNXT_FLAG_USING_MSIX)
5252                 bnxt_setup_msix(bp);
5253         else
5254                 bnxt_setup_inta(bp);
5255
5256         rc = bnxt_set_real_num_queues(bp);
5257         return rc;
5258 }
5259
5260 #ifdef CONFIG_RFS_ACCEL
5261 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
5262 {
5263 #if defined(CONFIG_BNXT_SRIOV)
5264         if (BNXT_VF(bp))
5265                 return bp->vf.max_rsscos_ctxs;
5266 #endif
5267         return bp->pf.max_rsscos_ctxs;
5268 }
5269
5270 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
5271 {
5272 #if defined(CONFIG_BNXT_SRIOV)
5273         if (BNXT_VF(bp))
5274                 return bp->vf.max_vnics;
5275 #endif
5276         return bp->pf.max_vnics;
5277 }
5278 #endif
5279
5280 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
5281 {
5282 #if defined(CONFIG_BNXT_SRIOV)
5283         if (BNXT_VF(bp))
5284                 return bp->vf.max_stat_ctxs;
5285 #endif
5286         return bp->pf.max_stat_ctxs;
5287 }
5288
5289 void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
5290 {
5291 #if defined(CONFIG_BNXT_SRIOV)
5292         if (BNXT_VF(bp))
5293                 bp->vf.max_stat_ctxs = max;
5294         else
5295 #endif
5296                 bp->pf.max_stat_ctxs = max;
5297 }
5298
5299 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
5300 {
5301 #if defined(CONFIG_BNXT_SRIOV)
5302         if (BNXT_VF(bp))
5303                 return bp->vf.max_cp_rings;
5304 #endif
5305         return bp->pf.max_cp_rings;
5306 }
5307
5308 void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
5309 {
5310 #if defined(CONFIG_BNXT_SRIOV)
5311         if (BNXT_VF(bp))
5312                 bp->vf.max_cp_rings = max;
5313         else
5314 #endif
5315                 bp->pf.max_cp_rings = max;
5316 }
5317
5318 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5319 {
5320 #if defined(CONFIG_BNXT_SRIOV)
5321         if (BNXT_VF(bp))
5322                 return min_t(unsigned int, bp->vf.max_irqs,
5323                              bp->vf.max_cp_rings);
5324 #endif
5325         return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5326 }
5327
5328 void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5329 {
5330 #if defined(CONFIG_BNXT_SRIOV)
5331         if (BNXT_VF(bp))
5332                 bp->vf.max_irqs = max_irqs;
5333         else
5334 #endif
5335                 bp->pf.max_irqs = max_irqs;
5336 }
5337
5338 static int bnxt_init_msix(struct bnxt *bp)
5339 {
5340         int i, total_vecs, rc = 0, min = 1;
5341         struct msix_entry *msix_ent;
5342
5343         total_vecs = bnxt_get_max_func_irqs(bp);
5344         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
5345         if (!msix_ent)
5346                 return -ENOMEM;
5347
5348         for (i = 0; i < total_vecs; i++) {
5349                 msix_ent[i].entry = i;
5350                 msix_ent[i].vector = 0;
5351         }
5352
5353         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
5354                 min = 2;
5355
5356         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
5357         if (total_vecs < 0) {
5358                 rc = -ENODEV;
5359                 goto msix_setup_exit;
5360         }
5361
5362         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
5363         if (bp->irq_tbl) {
5364                 for (i = 0; i < total_vecs; i++)
5365                         bp->irq_tbl[i].vector = msix_ent[i].vector;
5366
5367                 bp->total_irqs = total_vecs;
5368                 /* Trim rings based upon num of vectors allocated */
5369                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
5370                                      total_vecs, min == 1);
5371                 if (rc)
5372                         goto msix_setup_exit;
5373
5374                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5375                 bp->cp_nr_rings = (min == 1) ?
5376                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5377                                   bp->tx_nr_rings + bp->rx_nr_rings;
5378
5379         } else {
5380                 rc = -ENOMEM;
5381                 goto msix_setup_exit;
5382         }
5383         bp->flags |= BNXT_FLAG_USING_MSIX;
5384         kfree(msix_ent);
5385         return 0;
5386
5387 msix_setup_exit:
5388         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
5389         kfree(bp->irq_tbl);
5390         bp->irq_tbl = NULL;
5391         pci_disable_msix(bp->pdev);
5392         kfree(msix_ent);
5393         return rc;
5394 }
5395
5396 static int bnxt_init_inta(struct bnxt *bp)
5397 {
5398         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
5399         if (!bp->irq_tbl)
5400                 return -ENOMEM;
5401
5402         bp->total_irqs = 1;
5403         bp->rx_nr_rings = 1;
5404         bp->tx_nr_rings = 1;
5405         bp->cp_nr_rings = 1;
5406         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5407         bp->flags |= BNXT_FLAG_SHARED_RINGS;
5408         bp->irq_tbl[0].vector = bp->pdev->irq;
5409         return 0;
5410 }
5411
5412 static int bnxt_init_int_mode(struct bnxt *bp)
5413 {
5414         int rc = 0;
5415
5416         if (bp->flags & BNXT_FLAG_MSIX_CAP)
5417                 rc = bnxt_init_msix(bp);
5418
5419         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
5420                 /* fallback to INTA */
5421                 rc = bnxt_init_inta(bp);
5422         }
5423         return rc;
5424 }
5425
5426 static void bnxt_clear_int_mode(struct bnxt *bp)
5427 {
5428         if (bp->flags & BNXT_FLAG_USING_MSIX)
5429                 pci_disable_msix(bp->pdev);
5430
5431         kfree(bp->irq_tbl);
5432         bp->irq_tbl = NULL;
5433         bp->flags &= ~BNXT_FLAG_USING_MSIX;
5434 }
5435
5436 static void bnxt_free_irq(struct bnxt *bp)
5437 {
5438         struct bnxt_irq *irq;
5439         int i;
5440
5441 #ifdef CONFIG_RFS_ACCEL
5442         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
5443         bp->dev->rx_cpu_rmap = NULL;
5444 #endif
5445         if (!bp->irq_tbl)
5446                 return;
5447
5448         for (i = 0; i < bp->cp_nr_rings; i++) {
5449                 irq = &bp->irq_tbl[i];
5450                 if (irq->requested)
5451                         free_irq(irq->vector, bp->bnapi[i]);
5452                 irq->requested = 0;
5453         }
5454 }
5455
5456 static int bnxt_request_irq(struct bnxt *bp)
5457 {
5458         int i, j, rc = 0;
5459         unsigned long flags = 0;
5460 #ifdef CONFIG_RFS_ACCEL
5461         struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
5462 #endif
5463
5464         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
5465                 flags = IRQF_SHARED;
5466
5467         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
5468                 struct bnxt_irq *irq = &bp->irq_tbl[i];
5469 #ifdef CONFIG_RFS_ACCEL
5470                 if (rmap && bp->bnapi[i]->rx_ring) {
5471                         rc = irq_cpu_rmap_add(rmap, irq->vector);
5472                         if (rc)
5473                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
5474                                             j);
5475                         j++;
5476                 }
5477 #endif
5478                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
5479                                  bp->bnapi[i]);
5480                 if (rc)
5481                         break;
5482
5483                 irq->requested = 1;
5484         }
5485         return rc;
5486 }
5487
5488 static void bnxt_del_napi(struct bnxt *bp)
5489 {
5490         int i;
5491
5492         if (!bp->bnapi)
5493                 return;
5494
5495         for (i = 0; i < bp->cp_nr_rings; i++) {
5496                 struct bnxt_napi *bnapi = bp->bnapi[i];
5497
5498                 napi_hash_del(&bnapi->napi);
5499                 netif_napi_del(&bnapi->napi);
5500         }
5501         /* We called napi_hash_del() before netif_napi_del(), we need
5502          * to respect an RCU grace period before freeing napi structures.
5503          */
5504         synchronize_net();
5505 }
5506
5507 static void bnxt_init_napi(struct bnxt *bp)
5508 {
5509         int i;
5510         unsigned int cp_nr_rings = bp->cp_nr_rings;
5511         struct bnxt_napi *bnapi;
5512
5513         if (bp->flags & BNXT_FLAG_USING_MSIX) {
5514                 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5515                         cp_nr_rings--;
5516                 for (i = 0; i < cp_nr_rings; i++) {
5517                         bnapi = bp->bnapi[i];
5518                         netif_napi_add(bp->dev, &bnapi->napi,
5519                                        bnxt_poll, 64);
5520                 }
5521                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
5522                         bnapi = bp->bnapi[cp_nr_rings];
5523                         netif_napi_add(bp->dev, &bnapi->napi,
5524                                        bnxt_poll_nitroa0, 64);
5525                 }
5526         } else {
5527                 bnapi = bp->bnapi[0];
5528                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
5529         }
5530 }
5531
5532 static void bnxt_disable_napi(struct bnxt *bp)
5533 {
5534         int i;
5535
5536         if (!bp->bnapi)
5537                 return;
5538
5539         for (i = 0; i < bp->cp_nr_rings; i++)
5540                 napi_disable(&bp->bnapi[i]->napi);
5541 }
5542
5543 static void bnxt_enable_napi(struct bnxt *bp)
5544 {
5545         int i;
5546
5547         for (i = 0; i < bp->cp_nr_rings; i++) {
5548                 bp->bnapi[i]->in_reset = false;
5549                 napi_enable(&bp->bnapi[i]->napi);
5550         }
5551 }
5552
5553 void bnxt_tx_disable(struct bnxt *bp)
5554 {
5555         int i;
5556         struct bnxt_tx_ring_info *txr;
5557         struct netdev_queue *txq;
5558
5559         if (bp->tx_ring) {
5560                 for (i = 0; i < bp->tx_nr_rings; i++) {
5561                         txr = &bp->tx_ring[i];
5562                         txq = netdev_get_tx_queue(bp->dev, i);
5563                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
5564                 }
5565         }
5566         /* Stop all TX queues */
5567         netif_tx_disable(bp->dev);
5568         netif_carrier_off(bp->dev);
5569 }
5570
5571 void bnxt_tx_enable(struct bnxt *bp)
5572 {
5573         int i;
5574         struct bnxt_tx_ring_info *txr;
5575         struct netdev_queue *txq;
5576
5577         for (i = 0; i < bp->tx_nr_rings; i++) {
5578                 txr = &bp->tx_ring[i];
5579                 txq = netdev_get_tx_queue(bp->dev, i);
5580                 txr->dev_state = 0;
5581         }
5582         netif_tx_wake_all_queues(bp->dev);
5583         if (bp->link_info.link_up)
5584                 netif_carrier_on(bp->dev);
5585 }
5586
5587 static void bnxt_report_link(struct bnxt *bp)
5588 {
5589         if (bp->link_info.link_up) {
5590                 const char *duplex;
5591                 const char *flow_ctrl;
5592                 u32 speed;
5593                 u16 fec;
5594
5595                 netif_carrier_on(bp->dev);
5596                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
5597                         duplex = "full";
5598                 else
5599                         duplex = "half";
5600                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
5601                         flow_ctrl = "ON - receive & transmit";
5602                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
5603                         flow_ctrl = "ON - transmit";
5604                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
5605                         flow_ctrl = "ON - receive";
5606                 else
5607                         flow_ctrl = "none";
5608                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
5609                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
5610                             speed, duplex, flow_ctrl);
5611                 if (bp->flags & BNXT_FLAG_EEE_CAP)
5612                         netdev_info(bp->dev, "EEE is %s\n",
5613                                     bp->eee.eee_active ? "active" :
5614                                                          "not active");
5615                 fec = bp->link_info.fec_cfg;
5616                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
5617                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
5618                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
5619                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
5620                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
5621         } else {
5622                 netif_carrier_off(bp->dev);
5623                 netdev_err(bp->dev, "NIC Link is Down\n");
5624         }
5625 }
5626
5627 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
5628 {
5629         int rc = 0;
5630         struct hwrm_port_phy_qcaps_input req = {0};
5631         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5632         struct bnxt_link_info *link_info = &bp->link_info;
5633
5634         if (bp->hwrm_spec_code < 0x10201)
5635                 return 0;
5636
5637         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
5638
5639         mutex_lock(&bp->hwrm_cmd_lock);
5640         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5641         if (rc)
5642                 goto hwrm_phy_qcaps_exit;
5643
5644         if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
5645                 struct ethtool_eee *eee = &bp->eee;
5646                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
5647
5648                 bp->flags |= BNXT_FLAG_EEE_CAP;
5649                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5650                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
5651                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
5652                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
5653                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
5654         }
5655         if (resp->supported_speeds_auto_mode)
5656                 link_info->support_auto_speeds =
5657                         le16_to_cpu(resp->supported_speeds_auto_mode);
5658
5659 hwrm_phy_qcaps_exit:
5660         mutex_unlock(&bp->hwrm_cmd_lock);
5661         return rc;
5662 }
5663
5664 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
5665 {
5666         int rc = 0;
5667         struct bnxt_link_info *link_info = &bp->link_info;
5668         struct hwrm_port_phy_qcfg_input req = {0};
5669         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5670         u8 link_up = link_info->link_up;
5671         u16 diff;
5672
5673         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
5674
5675         mutex_lock(&bp->hwrm_cmd_lock);
5676         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5677         if (rc) {
5678                 mutex_unlock(&bp->hwrm_cmd_lock);
5679                 return rc;
5680         }
5681
5682         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
5683         link_info->phy_link_status = resp->link;
5684         link_info->duplex =  resp->duplex;
5685         link_info->pause = resp->pause;
5686         link_info->auto_mode = resp->auto_mode;
5687         link_info->auto_pause_setting = resp->auto_pause;
5688         link_info->lp_pause = resp->link_partner_adv_pause;
5689         link_info->force_pause_setting = resp->force_pause;
5690         link_info->duplex_setting = resp->duplex;
5691         if (link_info->phy_link_status == BNXT_LINK_LINK)
5692                 link_info->link_speed = le16_to_cpu(resp->link_speed);
5693         else
5694                 link_info->link_speed = 0;
5695         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
5696         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
5697         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
5698         link_info->lp_auto_link_speeds =
5699                 le16_to_cpu(resp->link_partner_adv_speeds);
5700         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
5701         link_info->phy_ver[0] = resp->phy_maj;
5702         link_info->phy_ver[1] = resp->phy_min;
5703         link_info->phy_ver[2] = resp->phy_bld;
5704         link_info->media_type = resp->media_type;
5705         link_info->phy_type = resp->phy_type;
5706         link_info->transceiver = resp->xcvr_pkg_type;
5707         link_info->phy_addr = resp->eee_config_phy_addr &
5708                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
5709         link_info->module_status = resp->module_status;
5710
5711         if (bp->flags & BNXT_FLAG_EEE_CAP) {
5712                 struct ethtool_eee *eee = &bp->eee;
5713                 u16 fw_speeds;
5714
5715                 eee->eee_active = 0;
5716                 if (resp->eee_config_phy_addr &
5717                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
5718                         eee->eee_active = 1;
5719                         fw_speeds = le16_to_cpu(
5720                                 resp->link_partner_adv_eee_link_speed_mask);
5721                         eee->lp_advertised =
5722                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5723                 }
5724
5725                 /* Pull initial EEE config */
5726                 if (!chng_link_state) {
5727                         if (resp->eee_config_phy_addr &
5728                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
5729                                 eee->eee_enabled = 1;
5730
5731                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
5732                         eee->advertised =
5733                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
5734
5735                         if (resp->eee_config_phy_addr &
5736                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
5737                                 __le32 tmr;
5738
5739                                 eee->tx_lpi_enabled = 1;
5740                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
5741                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
5742                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
5743                         }
5744                 }
5745         }
5746
5747         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
5748         if (bp->hwrm_spec_code >= 0x10504)
5749                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
5750
5751         /* TODO: need to add more logic to report VF link */
5752         if (chng_link_state) {
5753                 if (link_info->phy_link_status == BNXT_LINK_LINK)
5754                         link_info->link_up = 1;
5755                 else
5756                         link_info->link_up = 0;
5757                 if (link_up != link_info->link_up)
5758                         bnxt_report_link(bp);
5759         } else {
5760                 /* alwasy link down if not require to update link state */
5761                 link_info->link_up = 0;
5762         }
5763         mutex_unlock(&bp->hwrm_cmd_lock);
5764
5765         diff = link_info->support_auto_speeds ^ link_info->advertising;
5766         if ((link_info->support_auto_speeds | diff) !=
5767             link_info->support_auto_speeds) {
5768                 /* An advertised speed is no longer supported, so we need to
5769                  * update the advertisement settings.  Caller holds RTNL
5770                  * so we can modify link settings.
5771                  */
5772                 link_info->advertising = link_info->support_auto_speeds;
5773                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5774                         bnxt_hwrm_set_link_setting(bp, true, false);
5775         }
5776         return 0;
5777 }
5778
5779 static void bnxt_get_port_module_status(struct bnxt *bp)
5780 {
5781         struct bnxt_link_info *link_info = &bp->link_info;
5782         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
5783         u8 module_status;
5784
5785         if (bnxt_update_link(bp, true))
5786                 return;
5787
5788         module_status = link_info->module_status;
5789         switch (module_status) {
5790         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
5791         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
5792         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
5793                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
5794                             bp->pf.port_id);
5795                 if (bp->hwrm_spec_code >= 0x10201) {
5796                         netdev_warn(bp->dev, "Module part number %s\n",
5797                                     resp->phy_vendor_partnumber);
5798                 }
5799                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
5800                         netdev_warn(bp->dev, "TX is disabled\n");
5801                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
5802                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
5803         }
5804 }
5805
5806 static void
5807 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
5808 {
5809         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
5810                 if (bp->hwrm_spec_code >= 0x10201)
5811                         req->auto_pause =
5812                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
5813                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5814                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
5815                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5816                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
5817                 req->enables |=
5818                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5819         } else {
5820                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
5821                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
5822                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
5823                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
5824                 req->enables |=
5825                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
5826                 if (bp->hwrm_spec_code >= 0x10201) {
5827                         req->auto_pause = req->force_pause;
5828                         req->enables |= cpu_to_le32(
5829                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
5830                 }
5831         }
5832 }
5833
5834 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
5835                                       struct hwrm_port_phy_cfg_input *req)
5836 {
5837         u8 autoneg = bp->link_info.autoneg;
5838         u16 fw_link_speed = bp->link_info.req_link_speed;
5839         u16 advertising = bp->link_info.advertising;
5840
5841         if (autoneg & BNXT_AUTONEG_SPEED) {
5842                 req->auto_mode |=
5843                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
5844
5845                 req->enables |= cpu_to_le32(
5846                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
5847                 req->auto_link_speed_mask = cpu_to_le16(advertising);
5848
5849                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
5850                 req->flags |=
5851                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
5852         } else {
5853                 req->force_link_speed = cpu_to_le16(fw_link_speed);
5854                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
5855         }
5856
5857         /* tell chimp that the setting takes effect immediately */
5858         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
5859 }
5860
5861 int bnxt_hwrm_set_pause(struct bnxt *bp)
5862 {
5863         struct hwrm_port_phy_cfg_input req = {0};
5864         int rc;
5865
5866         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5867         bnxt_hwrm_set_pause_common(bp, &req);
5868
5869         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
5870             bp->link_info.force_link_chng)
5871                 bnxt_hwrm_set_link_common(bp, &req);
5872
5873         mutex_lock(&bp->hwrm_cmd_lock);
5874         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5875         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
5876                 /* since changing of pause setting doesn't trigger any link
5877                  * change event, the driver needs to update the current pause
5878                  * result upon successfully return of the phy_cfg command
5879                  */
5880                 bp->link_info.pause =
5881                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
5882                 bp->link_info.auto_pause_setting = 0;
5883                 if (!bp->link_info.force_link_chng)
5884                         bnxt_report_link(bp);
5885         }
5886         bp->link_info.force_link_chng = false;
5887         mutex_unlock(&bp->hwrm_cmd_lock);
5888         return rc;
5889 }
5890
5891 static void bnxt_hwrm_set_eee(struct bnxt *bp,
5892                               struct hwrm_port_phy_cfg_input *req)
5893 {
5894         struct ethtool_eee *eee = &bp->eee;
5895
5896         if (eee->eee_enabled) {
5897                 u16 eee_speeds;
5898                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
5899
5900                 if (eee->tx_lpi_enabled)
5901                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
5902                 else
5903                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
5904
5905                 req->flags |= cpu_to_le32(flags);
5906                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
5907                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
5908                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
5909         } else {
5910                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
5911         }
5912 }
5913
5914 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
5915 {
5916         struct hwrm_port_phy_cfg_input req = {0};
5917
5918         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5919         if (set_pause)
5920                 bnxt_hwrm_set_pause_common(bp, &req);
5921
5922         bnxt_hwrm_set_link_common(bp, &req);
5923
5924         if (set_eee)
5925                 bnxt_hwrm_set_eee(bp, &req);
5926         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5927 }
5928
5929 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
5930 {
5931         struct hwrm_port_phy_cfg_input req = {0};
5932
5933         if (!BNXT_SINGLE_PF(bp))
5934                 return 0;
5935
5936         if (pci_num_vf(bp->pdev))
5937                 return 0;
5938
5939         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
5940         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
5941         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5942 }
5943
5944 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
5945 {
5946         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5947         struct hwrm_port_led_qcaps_input req = {0};
5948         struct bnxt_pf_info *pf = &bp->pf;
5949         int rc;
5950
5951         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
5952                 return 0;
5953
5954         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
5955         req.port_id = cpu_to_le16(pf->port_id);
5956         mutex_lock(&bp->hwrm_cmd_lock);
5957         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5958         if (rc) {
5959                 mutex_unlock(&bp->hwrm_cmd_lock);
5960                 return rc;
5961         }
5962         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
5963                 int i;
5964
5965                 bp->num_leds = resp->num_leds;
5966                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
5967                                                  bp->num_leds);
5968                 for (i = 0; i < bp->num_leds; i++) {
5969                         struct bnxt_led_info *led = &bp->leds[i];
5970                         __le16 caps = led->led_state_caps;
5971
5972                         if (!led->led_group_id ||
5973                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
5974                                 bp->num_leds = 0;
5975                                 break;
5976                         }
5977                 }
5978         }
5979         mutex_unlock(&bp->hwrm_cmd_lock);
5980         return 0;
5981 }
5982
5983 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
5984 {
5985         struct hwrm_wol_filter_alloc_input req = {0};
5986         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5987         int rc;
5988
5989         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
5990         req.port_id = cpu_to_le16(bp->pf.port_id);
5991         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
5992         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
5993         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
5994         mutex_lock(&bp->hwrm_cmd_lock);
5995         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5996         if (!rc)
5997                 bp->wol_filter_id = resp->wol_filter_id;
5998         mutex_unlock(&bp->hwrm_cmd_lock);
5999         return rc;
6000 }
6001
6002 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
6003 {
6004         struct hwrm_wol_filter_free_input req = {0};
6005         int rc;
6006
6007         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
6008         req.port_id = cpu_to_le16(bp->pf.port_id);
6009         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
6010         req.wol_filter_id = bp->wol_filter_id;
6011         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6012         return rc;
6013 }
6014
6015 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
6016 {
6017         struct hwrm_wol_filter_qcfg_input req = {0};
6018         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6019         u16 next_handle = 0;
6020         int rc;
6021
6022         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
6023         req.port_id = cpu_to_le16(bp->pf.port_id);
6024         req.handle = cpu_to_le16(handle);
6025         mutex_lock(&bp->hwrm_cmd_lock);
6026         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6027         if (!rc) {
6028                 next_handle = le16_to_cpu(resp->next_handle);
6029                 if (next_handle != 0) {
6030                         if (resp->wol_type ==
6031                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
6032                                 bp->wol = 1;
6033                                 bp->wol_filter_id = resp->wol_filter_id;
6034                         }
6035                 }
6036         }
6037         mutex_unlock(&bp->hwrm_cmd_lock);
6038         return next_handle;
6039 }
6040
6041 static void bnxt_get_wol_settings(struct bnxt *bp)
6042 {
6043         u16 handle = 0;
6044
6045         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
6046                 return;
6047
6048         do {
6049                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
6050         } while (handle && handle != 0xffff);
6051 }
6052
6053 static bool bnxt_eee_config_ok(struct bnxt *bp)
6054 {
6055         struct ethtool_eee *eee = &bp->eee;
6056         struct bnxt_link_info *link_info = &bp->link_info;
6057
6058         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
6059                 return true;
6060
6061         if (eee->eee_enabled) {
6062                 u32 advertising =
6063                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
6064
6065                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6066                         eee->eee_enabled = 0;
6067                         return false;
6068                 }
6069                 if (eee->advertised & ~advertising) {
6070                         eee->advertised = advertising & eee->supported;
6071                         return false;
6072                 }
6073         }
6074         return true;
6075 }
6076
6077 static int bnxt_update_phy_setting(struct bnxt *bp)
6078 {
6079         int rc;
6080         bool update_link = false;
6081         bool update_pause = false;
6082         bool update_eee = false;
6083         struct bnxt_link_info *link_info = &bp->link_info;
6084
6085         rc = bnxt_update_link(bp, true);
6086         if (rc) {
6087                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
6088                            rc);
6089                 return rc;
6090         }
6091         if (!BNXT_SINGLE_PF(bp))
6092                 return 0;
6093
6094         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6095             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
6096             link_info->req_flow_ctrl)
6097                 update_pause = true;
6098         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
6099             link_info->force_pause_setting != link_info->req_flow_ctrl)
6100                 update_pause = true;
6101         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
6102                 if (BNXT_AUTO_MODE(link_info->auto_mode))
6103                         update_link = true;
6104                 if (link_info->req_link_speed != link_info->force_link_speed)
6105                         update_link = true;
6106                 if (link_info->req_duplex != link_info->duplex_setting)
6107                         update_link = true;
6108         } else {
6109                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
6110                         update_link = true;
6111                 if (link_info->advertising != link_info->auto_link_speeds)
6112                         update_link = true;
6113         }
6114
6115         /* The last close may have shutdown the link, so need to call
6116          * PHY_CFG to bring it back up.
6117          */
6118         if (!netif_carrier_ok(bp->dev))
6119                 update_link = true;
6120
6121         if (!bnxt_eee_config_ok(bp))
6122                 update_eee = true;
6123
6124         if (update_link)
6125                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
6126         else if (update_pause)
6127                 rc = bnxt_hwrm_set_pause(bp);
6128         if (rc) {
6129                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
6130                            rc);
6131                 return rc;
6132         }
6133
6134         return rc;
6135 }
6136
6137 /* Common routine to pre-map certain register block to different GRC window.
6138  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
6139  * in PF and 3 windows in VF that can be customized to map in different
6140  * register blocks.
6141  */
6142 static void bnxt_preset_reg_win(struct bnxt *bp)
6143 {
6144         if (BNXT_PF(bp)) {
6145                 /* CAG registers map to GRC window #4 */
6146                 writel(BNXT_CAG_REG_BASE,
6147                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
6148         }
6149 }
6150
6151 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6152 {
6153         int rc = 0;
6154
6155         bnxt_preset_reg_win(bp);
6156         netif_carrier_off(bp->dev);
6157         if (irq_re_init) {
6158                 rc = bnxt_setup_int_mode(bp);
6159                 if (rc) {
6160                         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
6161                                    rc);
6162                         return rc;
6163                 }
6164         }
6165         if ((bp->flags & BNXT_FLAG_RFS) &&
6166             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
6167                 /* disable RFS if falling back to INTA */
6168                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
6169                 bp->flags &= ~BNXT_FLAG_RFS;
6170         }
6171
6172         rc = bnxt_alloc_mem(bp, irq_re_init);
6173         if (rc) {
6174                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6175                 goto open_err_free_mem;
6176         }
6177
6178         if (irq_re_init) {
6179                 bnxt_init_napi(bp);
6180                 rc = bnxt_request_irq(bp);
6181                 if (rc) {
6182                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6183                         goto open_err;
6184                 }
6185         }
6186
6187         bnxt_enable_napi(bp);
6188
6189         rc = bnxt_init_nic(bp, irq_re_init);
6190         if (rc) {
6191                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6192                 goto open_err;
6193         }
6194
6195         if (link_re_init) {
6196                 rc = bnxt_update_phy_setting(bp);
6197                 if (rc)
6198                         netdev_warn(bp->dev, "failed to update phy settings\n");
6199         }
6200
6201         if (irq_re_init)
6202                 udp_tunnel_get_rx_info(bp->dev);
6203
6204         set_bit(BNXT_STATE_OPEN, &bp->state);
6205         bnxt_enable_int(bp);
6206         /* Enable TX queues */
6207         bnxt_tx_enable(bp);
6208         mod_timer(&bp->timer, jiffies + bp->current_interval);
6209         /* Poll link status and check for SFP+ module status */
6210         bnxt_get_port_module_status(bp);
6211
6212         return 0;
6213
6214 open_err:
6215         bnxt_disable_napi(bp);
6216         bnxt_del_napi(bp);
6217
6218 open_err_free_mem:
6219         bnxt_free_skbs(bp);
6220         bnxt_free_irq(bp);
6221         bnxt_free_mem(bp, true);
6222         return rc;
6223 }
6224
6225 /* rtnl_lock held */
6226 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6227 {
6228         int rc = 0;
6229
6230         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
6231         if (rc) {
6232                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
6233                 dev_close(bp->dev);
6234         }
6235         return rc;
6236 }
6237
6238 /* rtnl_lock held, open the NIC half way by allocating all resources, but
6239  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
6240  * self tests.
6241  */
6242 int bnxt_half_open_nic(struct bnxt *bp)
6243 {
6244         int rc = 0;
6245
6246         rc = bnxt_alloc_mem(bp, false);
6247         if (rc) {
6248                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
6249                 goto half_open_err;
6250         }
6251         rc = bnxt_init_nic(bp, false);
6252         if (rc) {
6253                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
6254                 goto half_open_err;
6255         }
6256         return 0;
6257
6258 half_open_err:
6259         bnxt_free_skbs(bp);
6260         bnxt_free_mem(bp, false);
6261         dev_close(bp->dev);
6262         return rc;
6263 }
6264
6265 /* rtnl_lock held, this call can only be made after a previous successful
6266  * call to bnxt_half_open_nic().
6267  */
6268 void bnxt_half_close_nic(struct bnxt *bp)
6269 {
6270         bnxt_hwrm_resource_free(bp, false, false);
6271         bnxt_free_skbs(bp);
6272         bnxt_free_mem(bp, false);
6273 }
6274
6275 static int bnxt_open(struct net_device *dev)
6276 {
6277         struct bnxt *bp = netdev_priv(dev);
6278
6279         return __bnxt_open_nic(bp, true, true);
6280 }
6281
6282 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6283 {
6284         int rc = 0;
6285
6286 #ifdef CONFIG_BNXT_SRIOV
6287         if (bp->sriov_cfg) {
6288                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
6289                                                       !bp->sriov_cfg,
6290                                                       BNXT_SRIOV_CFG_WAIT_TMO);
6291                 if (rc)
6292                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
6293         }
6294 #endif
6295         /* Change device state to avoid TX queue wake up's */
6296         bnxt_tx_disable(bp);
6297
6298         clear_bit(BNXT_STATE_OPEN, &bp->state);
6299         smp_mb__after_atomic();
6300         while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
6301                 msleep(20);
6302
6303         /* Flush rings and and disable interrupts */
6304         bnxt_shutdown_nic(bp, irq_re_init);
6305
6306         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
6307
6308         bnxt_disable_napi(bp);
6309         del_timer_sync(&bp->timer);
6310         bnxt_free_skbs(bp);
6311
6312         if (irq_re_init) {
6313                 bnxt_free_irq(bp);
6314                 bnxt_del_napi(bp);
6315         }
6316         bnxt_free_mem(bp, irq_re_init);
6317         return rc;
6318 }
6319
6320 static int bnxt_close(struct net_device *dev)
6321 {
6322         struct bnxt *bp = netdev_priv(dev);
6323
6324         bnxt_close_nic(bp, true, true);
6325         bnxt_hwrm_shutdown_link(bp);
6326         return 0;
6327 }
6328
6329 /* rtnl_lock held */
6330 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6331 {
6332         switch (cmd) {
6333         case SIOCGMIIPHY:
6334                 /* fallthru */
6335         case SIOCGMIIREG: {
6336                 if (!netif_running(dev))
6337                         return -EAGAIN;
6338
6339                 return 0;
6340         }
6341
6342         case SIOCSMIIREG:
6343                 if (!netif_running(dev))
6344                         return -EAGAIN;
6345
6346                 return 0;
6347
6348         default:
6349                 /* do nothing */
6350                 break;
6351         }
6352         return -EOPNOTSUPP;
6353 }
6354
6355 static void
6356 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6357 {
6358         u32 i;
6359         struct bnxt *bp = netdev_priv(dev);
6360
6361         if (!bp->bnapi)
6362                 return;
6363
6364         /* TODO check if we need to synchronize with bnxt_close path */
6365         for (i = 0; i < bp->cp_nr_rings; i++) {
6366                 struct bnxt_napi *bnapi = bp->bnapi[i];
6367                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6368                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
6369
6370                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
6371                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
6372                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
6373
6374                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
6375                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
6376                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
6377
6378                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
6379                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
6380                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
6381
6382                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
6383                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
6384                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
6385
6386                 stats->rx_missed_errors +=
6387                         le64_to_cpu(hw_stats->rx_discard_pkts);
6388
6389                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
6390
6391                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
6392         }
6393
6394         if (bp->flags & BNXT_FLAG_PORT_STATS) {
6395                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
6396                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
6397
6398                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
6399                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
6400                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
6401                                           le64_to_cpu(rx->rx_ovrsz_frames) +
6402                                           le64_to_cpu(rx->rx_runt_frames);
6403                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
6404                                    le64_to_cpu(rx->rx_jbr_frames);
6405                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
6406                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
6407                 stats->tx_errors = le64_to_cpu(tx->tx_err);
6408         }
6409 }
6410
6411 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
6412 {
6413         struct net_device *dev = bp->dev;
6414         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6415         struct netdev_hw_addr *ha;
6416         u8 *haddr;
6417         int mc_count = 0;
6418         bool update = false;
6419         int off = 0;
6420
6421         netdev_for_each_mc_addr(ha, dev) {
6422                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
6423                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6424                         vnic->mc_list_count = 0;
6425                         return false;
6426                 }
6427                 haddr = ha->addr;
6428                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
6429                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
6430                         update = true;
6431                 }
6432                 off += ETH_ALEN;
6433                 mc_count++;
6434         }
6435         if (mc_count)
6436                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
6437
6438         if (mc_count != vnic->mc_list_count) {
6439                 vnic->mc_list_count = mc_count;
6440                 update = true;
6441         }
6442         return update;
6443 }
6444
6445 static bool bnxt_uc_list_updated(struct bnxt *bp)
6446 {
6447         struct net_device *dev = bp->dev;
6448         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6449         struct netdev_hw_addr *ha;
6450         int off = 0;
6451
6452         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
6453                 return true;
6454
6455         netdev_for_each_uc_addr(ha, dev) {
6456                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
6457                         return true;
6458
6459                 off += ETH_ALEN;
6460         }
6461         return false;
6462 }
6463
6464 static void bnxt_set_rx_mode(struct net_device *dev)
6465 {
6466         struct bnxt *bp = netdev_priv(dev);
6467         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6468         u32 mask = vnic->rx_mask;
6469         bool mc_update = false;
6470         bool uc_update;
6471
6472         if (!netif_running(dev))
6473                 return;
6474
6475         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
6476                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
6477                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
6478
6479         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
6480                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6481
6482         uc_update = bnxt_uc_list_updated(bp);
6483
6484         if (dev->flags & IFF_ALLMULTI) {
6485                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6486                 vnic->mc_list_count = 0;
6487         } else {
6488                 mc_update = bnxt_mc_list_updated(bp, &mask);
6489         }
6490
6491         if (mask != vnic->rx_mask || uc_update || mc_update) {
6492                 vnic->rx_mask = mask;
6493
6494                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
6495                 schedule_work(&bp->sp_task);
6496         }
6497 }
6498
6499 static int bnxt_cfg_rx_mode(struct bnxt *bp)
6500 {
6501         struct net_device *dev = bp->dev;
6502         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
6503         struct netdev_hw_addr *ha;
6504         int i, off = 0, rc;
6505         bool uc_update;
6506
6507         netif_addr_lock_bh(dev);
6508         uc_update = bnxt_uc_list_updated(bp);
6509         netif_addr_unlock_bh(dev);
6510
6511         if (!uc_update)
6512                 goto skip_uc;
6513
6514         mutex_lock(&bp->hwrm_cmd_lock);
6515         for (i = 1; i < vnic->uc_filter_count; i++) {
6516                 struct hwrm_cfa_l2_filter_free_input req = {0};
6517
6518                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
6519                                        -1);
6520
6521                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
6522
6523                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6524                                         HWRM_CMD_TIMEOUT);
6525         }
6526         mutex_unlock(&bp->hwrm_cmd_lock);
6527
6528         vnic->uc_filter_count = 1;
6529
6530         netif_addr_lock_bh(dev);
6531         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
6532                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6533         } else {
6534                 netdev_for_each_uc_addr(ha, dev) {
6535                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
6536                         off += ETH_ALEN;
6537                         vnic->uc_filter_count++;
6538                 }
6539         }
6540         netif_addr_unlock_bh(dev);
6541
6542         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
6543                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
6544                 if (rc) {
6545                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
6546                                    rc);
6547                         vnic->uc_filter_count = i;
6548                         return rc;
6549                 }
6550         }
6551
6552 skip_uc:
6553         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
6554         if (rc)
6555                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
6556                            rc);
6557
6558         return rc;
6559 }
6560
6561 /* If the chip and firmware supports RFS */
6562 static bool bnxt_rfs_supported(struct bnxt *bp)
6563 {
6564         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6565                 return true;
6566         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6567                 return true;
6568         return false;
6569 }
6570
6571 /* If runtime conditions support RFS */
6572 static bool bnxt_rfs_capable(struct bnxt *bp)
6573 {
6574 #ifdef CONFIG_RFS_ACCEL
6575         int vnics, max_vnics, max_rss_ctxs;
6576
6577         if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
6578                 return false;
6579
6580         vnics = 1 + bp->rx_nr_rings;
6581         max_vnics = bnxt_get_max_func_vnics(bp);
6582         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
6583
6584         /* RSS contexts not a limiting factor */
6585         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6586                 max_rss_ctxs = max_vnics;
6587         if (vnics > max_vnics || vnics > max_rss_ctxs) {
6588                 netdev_warn(bp->dev,
6589                             "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
6590                             min(max_rss_ctxs - 1, max_vnics - 1));
6591                 return false;
6592         }
6593
6594         return true;
6595 #else
6596         return false;
6597 #endif
6598 }
6599
6600 static netdev_features_t bnxt_fix_features(struct net_device *dev,
6601                                            netdev_features_t features)
6602 {
6603         struct bnxt *bp = netdev_priv(dev);
6604
6605         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
6606                 features &= ~NETIF_F_NTUPLE;
6607
6608         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
6609          * turned on or off together.
6610          */
6611         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
6612             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
6613                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
6614                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6615                                       NETIF_F_HW_VLAN_STAG_RX);
6616                 else
6617                         features |= NETIF_F_HW_VLAN_CTAG_RX |
6618                                     NETIF_F_HW_VLAN_STAG_RX;
6619         }
6620 #ifdef CONFIG_BNXT_SRIOV
6621         if (BNXT_VF(bp)) {
6622                 if (bp->vf.vlan) {
6623                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
6624                                       NETIF_F_HW_VLAN_STAG_RX);
6625                 }
6626         }
6627 #endif
6628         return features;
6629 }
6630
6631 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
6632 {
6633         struct bnxt *bp = netdev_priv(dev);
6634         u32 flags = bp->flags;
6635         u32 changes;
6636         int rc = 0;
6637         bool re_init = false;
6638         bool update_tpa = false;
6639
6640         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
6641         if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
6642                 flags |= BNXT_FLAG_GRO;
6643         if (features & NETIF_F_LRO)
6644                 flags |= BNXT_FLAG_LRO;
6645
6646         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
6647                 flags &= ~BNXT_FLAG_TPA;
6648
6649         if (features & NETIF_F_HW_VLAN_CTAG_RX)
6650                 flags |= BNXT_FLAG_STRIP_VLAN;
6651
6652         if (features & NETIF_F_NTUPLE)
6653                 flags |= BNXT_FLAG_RFS;
6654
6655         changes = flags ^ bp->flags;
6656         if (changes & BNXT_FLAG_TPA) {
6657                 update_tpa = true;
6658                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
6659                     (flags & BNXT_FLAG_TPA) == 0)
6660                         re_init = true;
6661         }
6662
6663         if (changes & ~BNXT_FLAG_TPA)
6664                 re_init = true;
6665
6666         if (flags != bp->flags) {
6667                 u32 old_flags = bp->flags;
6668
6669                 bp->flags = flags;
6670
6671                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6672                         if (update_tpa)
6673                                 bnxt_set_ring_params(bp);
6674                         return rc;
6675                 }
6676
6677                 if (re_init) {
6678                         bnxt_close_nic(bp, false, false);
6679                         if (update_tpa)
6680                                 bnxt_set_ring_params(bp);
6681
6682                         return bnxt_open_nic(bp, false, false);
6683                 }
6684                 if (update_tpa) {
6685                         rc = bnxt_set_tpa(bp,
6686                                           (flags & BNXT_FLAG_TPA) ?
6687                                           true : false);
6688                         if (rc)
6689                                 bp->flags = old_flags;
6690                 }
6691         }
6692         return rc;
6693 }
6694
6695 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
6696 {
6697         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
6698         int i = bnapi->index;
6699
6700         if (!txr)
6701                 return;
6702
6703         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
6704                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
6705                     txr->tx_cons);
6706 }
6707
6708 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
6709 {
6710         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
6711         int i = bnapi->index;
6712
6713         if (!rxr)
6714                 return;
6715
6716         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
6717                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
6718                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
6719                     rxr->rx_sw_agg_prod);
6720 }
6721
6722 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
6723 {
6724         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6725         int i = bnapi->index;
6726
6727         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
6728                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
6729 }
6730
6731 static void bnxt_dbg_dump_states(struct bnxt *bp)
6732 {
6733         int i;
6734         struct bnxt_napi *bnapi;
6735
6736         for (i = 0; i < bp->cp_nr_rings; i++) {
6737                 bnapi = bp->bnapi[i];
6738                 if (netif_msg_drv(bp)) {
6739                         bnxt_dump_tx_sw_state(bnapi);
6740                         bnxt_dump_rx_sw_state(bnapi);
6741                         bnxt_dump_cp_sw_state(bnapi);
6742                 }
6743         }
6744 }
6745
6746 static void bnxt_reset_task(struct bnxt *bp, bool silent)
6747 {
6748         if (!silent)
6749                 bnxt_dbg_dump_states(bp);
6750         if (netif_running(bp->dev)) {
6751                 int rc;
6752
6753                 if (!silent)
6754                         bnxt_ulp_stop(bp);
6755                 bnxt_close_nic(bp, false, false);
6756                 rc = bnxt_open_nic(bp, false, false);
6757                 if (!silent && !rc)
6758                         bnxt_ulp_start(bp);
6759         }
6760 }
6761
6762 static void bnxt_tx_timeout(struct net_device *dev)
6763 {
6764         struct bnxt *bp = netdev_priv(dev);
6765
6766         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
6767         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
6768         schedule_work(&bp->sp_task);
6769 }
6770
6771 #ifdef CONFIG_NET_POLL_CONTROLLER
6772 static void bnxt_poll_controller(struct net_device *dev)
6773 {
6774         struct bnxt *bp = netdev_priv(dev);
6775         int i;
6776
6777         /* Only process tx rings/combined rings in netpoll mode. */
6778         for (i = 0; i < bp->tx_nr_rings; i++) {
6779                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6780
6781                 napi_schedule(&txr->bnapi->napi);
6782         }
6783 }
6784 #endif
6785
6786 static void bnxt_timer(unsigned long data)
6787 {
6788         struct bnxt *bp = (struct bnxt *)data;
6789         struct net_device *dev = bp->dev;
6790
6791         if (!netif_running(dev))
6792                 return;
6793
6794         if (atomic_read(&bp->intr_sem) != 0)
6795                 goto bnxt_restart_timer;
6796
6797         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
6798                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
6799                 schedule_work(&bp->sp_task);
6800         }
6801 bnxt_restart_timer:
6802         mod_timer(&bp->timer, jiffies + bp->current_interval);
6803 }
6804
6805 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
6806 {
6807         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
6808          * set.  If the device is being closed, bnxt_close() may be holding
6809          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
6810          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
6811          */
6812         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6813         rtnl_lock();
6814 }
6815
6816 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
6817 {
6818         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6819         rtnl_unlock();
6820 }
6821
6822 /* Only called from bnxt_sp_task() */
6823 static void bnxt_reset(struct bnxt *bp, bool silent)
6824 {
6825         bnxt_rtnl_lock_sp(bp);
6826         if (test_bit(BNXT_STATE_OPEN, &bp->state))
6827                 bnxt_reset_task(bp, silent);
6828         bnxt_rtnl_unlock_sp(bp);
6829 }
6830
6831 static void bnxt_cfg_ntp_filters(struct bnxt *);
6832
6833 static void bnxt_sp_task(struct work_struct *work)
6834 {
6835         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
6836
6837         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6838         smp_mb__after_atomic();
6839         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
6840                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6841                 return;
6842         }
6843
6844         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
6845                 bnxt_cfg_rx_mode(bp);
6846
6847         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
6848                 bnxt_cfg_ntp_filters(bp);
6849         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
6850                 bnxt_hwrm_exec_fwd_req(bp);
6851         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6852                 bnxt_hwrm_tunnel_dst_port_alloc(
6853                         bp, bp->vxlan_port,
6854                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6855         }
6856         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6857                 bnxt_hwrm_tunnel_dst_port_free(
6858                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6859         }
6860         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
6861                 bnxt_hwrm_tunnel_dst_port_alloc(
6862                         bp, bp->nge_port,
6863                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6864         }
6865         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
6866                 bnxt_hwrm_tunnel_dst_port_free(
6867                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6868         }
6869         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
6870                 bnxt_hwrm_port_qstats(bp);
6871
6872         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
6873          * must be the last functions to be called before exiting.
6874          */
6875         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
6876                 int rc = 0;
6877
6878                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
6879                                        &bp->sp_event))
6880                         bnxt_hwrm_phy_qcaps(bp);
6881
6882                 bnxt_rtnl_lock_sp(bp);
6883                 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6884                         rc = bnxt_update_link(bp, true);
6885                 bnxt_rtnl_unlock_sp(bp);
6886                 if (rc)
6887                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
6888                                    rc);
6889         }
6890         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
6891                 bnxt_rtnl_lock_sp(bp);
6892                 if (test_bit(BNXT_STATE_OPEN, &bp->state))
6893                         bnxt_get_port_module_status(bp);
6894                 bnxt_rtnl_unlock_sp(bp);
6895         }
6896         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
6897                 bnxt_reset(bp, false);
6898
6899         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
6900                 bnxt_reset(bp, true);
6901
6902         smp_mb__before_atomic();
6903         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
6904 }
6905
6906 /* Under rtnl_lock */
6907 int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp)
6908 {
6909         int max_rx, max_tx, tx_sets = 1;
6910         int tx_rings_needed;
6911         bool sh = true;
6912         int rc;
6913
6914         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
6915                 sh = false;
6916
6917         if (tcs)
6918                 tx_sets = tcs;
6919
6920         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
6921         if (rc)
6922                 return rc;
6923
6924         if (max_rx < rx)
6925                 return -ENOMEM;
6926
6927         tx_rings_needed = tx * tx_sets + tx_xdp;
6928         if (max_tx < tx_rings_needed)
6929                 return -ENOMEM;
6930
6931         if (bnxt_hwrm_reserve_tx_rings(bp, &tx_rings_needed) ||
6932             tx_rings_needed < (tx * tx_sets + tx_xdp))
6933                 return -ENOMEM;
6934         return 0;
6935 }
6936
6937 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
6938 {
6939         if (bp->bar2) {
6940                 pci_iounmap(pdev, bp->bar2);
6941                 bp->bar2 = NULL;
6942         }
6943
6944         if (bp->bar1) {
6945                 pci_iounmap(pdev, bp->bar1);
6946                 bp->bar1 = NULL;
6947         }
6948
6949         if (bp->bar0) {
6950                 pci_iounmap(pdev, bp->bar0);
6951                 bp->bar0 = NULL;
6952         }
6953 }
6954
6955 static void bnxt_cleanup_pci(struct bnxt *bp)
6956 {
6957         bnxt_unmap_bars(bp, bp->pdev);
6958         pci_release_regions(bp->pdev);
6959         pci_disable_device(bp->pdev);
6960 }
6961
6962 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
6963 {
6964         int rc;
6965         struct bnxt *bp = netdev_priv(dev);
6966
6967         SET_NETDEV_DEV(dev, &pdev->dev);
6968
6969         /* enable device (incl. PCI PM wakeup), and bus-mastering */
6970         rc = pci_enable_device(pdev);
6971         if (rc) {
6972                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
6973                 goto init_err;
6974         }
6975
6976         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6977                 dev_err(&pdev->dev,
6978                         "Cannot find PCI device base address, aborting\n");
6979                 rc = -ENODEV;
6980                 goto init_err_disable;
6981         }
6982
6983         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6984         if (rc) {
6985                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
6986                 goto init_err_disable;
6987         }
6988
6989         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
6990             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6991                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
6992                 goto init_err_disable;
6993         }
6994
6995         pci_set_master(pdev);
6996
6997         bp->dev = dev;
6998         bp->pdev = pdev;
6999
7000         bp->bar0 = pci_ioremap_bar(pdev, 0);
7001         if (!bp->bar0) {
7002                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
7003                 rc = -ENOMEM;
7004                 goto init_err_release;
7005         }
7006
7007         bp->bar1 = pci_ioremap_bar(pdev, 2);
7008         if (!bp->bar1) {
7009                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
7010                 rc = -ENOMEM;
7011                 goto init_err_release;
7012         }
7013
7014         bp->bar2 = pci_ioremap_bar(pdev, 4);
7015         if (!bp->bar2) {
7016                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
7017                 rc = -ENOMEM;
7018                 goto init_err_release;
7019         }
7020
7021         pci_enable_pcie_error_reporting(pdev);
7022
7023         INIT_WORK(&bp->sp_task, bnxt_sp_task);
7024
7025         spin_lock_init(&bp->ntp_fltr_lock);
7026
7027         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
7028         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
7029
7030         /* tick values in micro seconds */
7031         bp->rx_coal_ticks = 12;
7032         bp->rx_coal_bufs = 30;
7033         bp->rx_coal_ticks_irq = 1;
7034         bp->rx_coal_bufs_irq = 2;
7035
7036         bp->tx_coal_ticks = 25;
7037         bp->tx_coal_bufs = 30;
7038         bp->tx_coal_ticks_irq = 2;
7039         bp->tx_coal_bufs_irq = 2;
7040
7041         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
7042
7043         init_timer(&bp->timer);
7044         bp->timer.data = (unsigned long)bp;
7045         bp->timer.function = bnxt_timer;
7046         bp->current_interval = BNXT_TIMER_INTERVAL;
7047
7048         clear_bit(BNXT_STATE_OPEN, &bp->state);
7049         return 0;
7050
7051 init_err_release:
7052         bnxt_unmap_bars(bp, pdev);
7053         pci_release_regions(pdev);
7054
7055 init_err_disable:
7056         pci_disable_device(pdev);
7057
7058 init_err:
7059         return rc;
7060 }
7061
7062 /* rtnl_lock held */
7063 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
7064 {
7065         struct sockaddr *addr = p;
7066         struct bnxt *bp = netdev_priv(dev);
7067         int rc = 0;
7068
7069         if (!is_valid_ether_addr(addr->sa_data))
7070                 return -EADDRNOTAVAIL;
7071
7072         rc = bnxt_approve_mac(bp, addr->sa_data);
7073         if (rc)
7074                 return rc;
7075
7076         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
7077                 return 0;
7078
7079         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7080         if (netif_running(dev)) {
7081                 bnxt_close_nic(bp, false, false);
7082                 rc = bnxt_open_nic(bp, false, false);
7083         }
7084
7085         return rc;
7086 }
7087
7088 /* rtnl_lock held */
7089 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
7090 {
7091         struct bnxt *bp = netdev_priv(dev);
7092
7093         if (netif_running(dev))
7094                 bnxt_close_nic(bp, false, false);
7095
7096         dev->mtu = new_mtu;
7097         bnxt_set_ring_params(bp);
7098
7099         if (netif_running(dev))
7100                 return bnxt_open_nic(bp, false, false);
7101
7102         return 0;
7103 }
7104
7105 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
7106 {
7107         struct bnxt *bp = netdev_priv(dev);
7108         bool sh = false;
7109         int rc;
7110
7111         if (tc > bp->max_tc) {
7112                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
7113                            tc, bp->max_tc);
7114                 return -EINVAL;
7115         }
7116
7117         if (netdev_get_num_tc(dev) == tc)
7118                 return 0;
7119
7120         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7121                 sh = true;
7122
7123         rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
7124                                 tc, bp->tx_nr_rings_xdp);
7125         if (rc)
7126                 return rc;
7127
7128         /* Needs to close the device and do hw resource re-allocations */
7129         if (netif_running(bp->dev))
7130                 bnxt_close_nic(bp, true, false);
7131
7132         if (tc) {
7133                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
7134                 netdev_set_num_tc(dev, tc);
7135         } else {
7136                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7137                 netdev_reset_tc(dev);
7138         }
7139         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7140                                bp->tx_nr_rings + bp->rx_nr_rings;
7141         bp->num_stat_ctxs = bp->cp_nr_rings;
7142
7143         if (netif_running(bp->dev))
7144                 return bnxt_open_nic(bp, true, false);
7145
7146         return 0;
7147 }
7148
7149 static int bnxt_setup_tc(struct net_device *dev, u32 handle, u32 chain_index,
7150                          __be16 proto, struct tc_to_netdev *ntc)
7151 {
7152         if (ntc->type != TC_SETUP_MQPRIO)
7153                 return -EINVAL;
7154
7155         ntc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
7156
7157         return bnxt_setup_mq_tc(dev, ntc->mqprio->num_tc);
7158 }
7159
7160 #ifdef CONFIG_RFS_ACCEL
7161 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
7162                             struct bnxt_ntuple_filter *f2)
7163 {
7164         struct flow_keys *keys1 = &f1->fkeys;
7165         struct flow_keys *keys2 = &f2->fkeys;
7166
7167         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
7168             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
7169             keys1->ports.ports == keys2->ports.ports &&
7170             keys1->basic.ip_proto == keys2->basic.ip_proto &&
7171             keys1->basic.n_proto == keys2->basic.n_proto &&
7172             keys1->control.flags == keys2->control.flags &&
7173             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
7174             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
7175                 return true;
7176
7177         return false;
7178 }
7179
7180 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
7181                               u16 rxq_index, u32 flow_id)
7182 {
7183         struct bnxt *bp = netdev_priv(dev);
7184         struct bnxt_ntuple_filter *fltr, *new_fltr;
7185         struct flow_keys *fkeys;
7186         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
7187         int rc = 0, idx, bit_id, l2_idx = 0;
7188         struct hlist_head *head;
7189
7190         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
7191                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7192                 int off = 0, j;
7193
7194                 netif_addr_lock_bh(dev);
7195                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
7196                         if (ether_addr_equal(eth->h_dest,
7197                                              vnic->uc_list + off)) {
7198                                 l2_idx = j + 1;
7199                                 break;
7200                         }
7201                 }
7202                 netif_addr_unlock_bh(dev);
7203                 if (!l2_idx)
7204                         return -EINVAL;
7205         }
7206         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
7207         if (!new_fltr)
7208                 return -ENOMEM;
7209
7210         fkeys = &new_fltr->fkeys;
7211         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
7212                 rc = -EPROTONOSUPPORT;
7213                 goto err_free;
7214         }
7215
7216         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
7217              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
7218             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
7219              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
7220                 rc = -EPROTONOSUPPORT;
7221                 goto err_free;
7222         }
7223         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
7224             bp->hwrm_spec_code < 0x10601) {
7225                 rc = -EPROTONOSUPPORT;
7226                 goto err_free;
7227         }
7228         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
7229             bp->hwrm_spec_code < 0x10601) {
7230                 rc = -EPROTONOSUPPORT;
7231                 goto err_free;
7232         }
7233
7234         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
7235         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
7236
7237         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
7238         head = &bp->ntp_fltr_hash_tbl[idx];
7239         rcu_read_lock();
7240         hlist_for_each_entry_rcu(fltr, head, hash) {
7241                 if (bnxt_fltr_match(fltr, new_fltr)) {
7242                         rcu_read_unlock();
7243                         rc = 0;
7244                         goto err_free;
7245                 }
7246         }
7247         rcu_read_unlock();
7248
7249         spin_lock_bh(&bp->ntp_fltr_lock);
7250         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
7251                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
7252         if (bit_id < 0) {
7253                 spin_unlock_bh(&bp->ntp_fltr_lock);
7254                 rc = -ENOMEM;
7255                 goto err_free;
7256         }
7257
7258         new_fltr->sw_id = (u16)bit_id;
7259         new_fltr->flow_id = flow_id;
7260         new_fltr->l2_fltr_idx = l2_idx;
7261         new_fltr->rxq = rxq_index;
7262         hlist_add_head_rcu(&new_fltr->hash, head);
7263         bp->ntp_fltr_count++;
7264         spin_unlock_bh(&bp->ntp_fltr_lock);
7265
7266         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
7267         schedule_work(&bp->sp_task);
7268
7269         return new_fltr->sw_id;
7270
7271 err_free:
7272         kfree(new_fltr);
7273         return rc;
7274 }
7275
7276 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7277 {
7278         int i;
7279
7280         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
7281                 struct hlist_head *head;
7282                 struct hlist_node *tmp;
7283                 struct bnxt_ntuple_filter *fltr;
7284                 int rc;
7285
7286                 head = &bp->ntp_fltr_hash_tbl[i];
7287                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
7288                         bool del = false;
7289
7290                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
7291                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
7292                                                         fltr->flow_id,
7293                                                         fltr->sw_id)) {
7294                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
7295                                                                          fltr);
7296                                         del = true;
7297                                 }
7298                         } else {
7299                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
7300                                                                        fltr);
7301                                 if (rc)
7302                                         del = true;
7303                                 else
7304                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
7305                         }
7306
7307                         if (del) {
7308                                 spin_lock_bh(&bp->ntp_fltr_lock);
7309                                 hlist_del_rcu(&fltr->hash);
7310                                 bp->ntp_fltr_count--;
7311                                 spin_unlock_bh(&bp->ntp_fltr_lock);
7312                                 synchronize_rcu();
7313                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
7314                                 kfree(fltr);
7315                         }
7316                 }
7317         }
7318         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
7319                 netdev_info(bp->dev, "Receive PF driver unload event!");
7320 }
7321
7322 #else
7323
7324 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
7325 {
7326 }
7327
7328 #endif /* CONFIG_RFS_ACCEL */
7329
7330 static void bnxt_udp_tunnel_add(struct net_device *dev,
7331                                 struct udp_tunnel_info *ti)
7332 {
7333         struct bnxt *bp = netdev_priv(dev);
7334
7335         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7336                 return;
7337
7338         if (!netif_running(dev))
7339                 return;
7340
7341         switch (ti->type) {
7342         case UDP_TUNNEL_TYPE_VXLAN:
7343                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
7344                         return;
7345
7346                 bp->vxlan_port_cnt++;
7347                 if (bp->vxlan_port_cnt == 1) {
7348                         bp->vxlan_port = ti->port;
7349                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
7350                         schedule_work(&bp->sp_task);
7351                 }
7352                 break;
7353         case UDP_TUNNEL_TYPE_GENEVE:
7354                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
7355                         return;
7356
7357                 bp->nge_port_cnt++;
7358                 if (bp->nge_port_cnt == 1) {
7359                         bp->nge_port = ti->port;
7360                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
7361                 }
7362                 break;
7363         default:
7364                 return;
7365         }
7366
7367         schedule_work(&bp->sp_task);
7368 }
7369
7370 static void bnxt_udp_tunnel_del(struct net_device *dev,
7371                                 struct udp_tunnel_info *ti)
7372 {
7373         struct bnxt *bp = netdev_priv(dev);
7374
7375         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
7376                 return;
7377
7378         if (!netif_running(dev))
7379                 return;
7380
7381         switch (ti->type) {
7382         case UDP_TUNNEL_TYPE_VXLAN:
7383                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
7384                         return;
7385                 bp->vxlan_port_cnt--;
7386
7387                 if (bp->vxlan_port_cnt != 0)
7388                         return;
7389
7390                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
7391                 break;
7392         case UDP_TUNNEL_TYPE_GENEVE:
7393                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
7394                         return;
7395                 bp->nge_port_cnt--;
7396
7397                 if (bp->nge_port_cnt != 0)
7398                         return;
7399
7400                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
7401                 break;
7402         default:
7403                 return;
7404         }
7405
7406         schedule_work(&bp->sp_task);
7407 }
7408
7409 static const struct net_device_ops bnxt_netdev_ops = {
7410         .ndo_open               = bnxt_open,
7411         .ndo_start_xmit         = bnxt_start_xmit,
7412         .ndo_stop               = bnxt_close,
7413         .ndo_get_stats64        = bnxt_get_stats64,
7414         .ndo_set_rx_mode        = bnxt_set_rx_mode,
7415         .ndo_do_ioctl           = bnxt_ioctl,
7416         .ndo_validate_addr      = eth_validate_addr,
7417         .ndo_set_mac_address    = bnxt_change_mac_addr,
7418         .ndo_change_mtu         = bnxt_change_mtu,
7419         .ndo_fix_features       = bnxt_fix_features,
7420         .ndo_set_features       = bnxt_set_features,
7421         .ndo_tx_timeout         = bnxt_tx_timeout,
7422 #ifdef CONFIG_BNXT_SRIOV
7423         .ndo_get_vf_config      = bnxt_get_vf_config,
7424         .ndo_set_vf_mac         = bnxt_set_vf_mac,
7425         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
7426         .ndo_set_vf_rate        = bnxt_set_vf_bw,
7427         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
7428         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
7429 #endif
7430 #ifdef CONFIG_NET_POLL_CONTROLLER
7431         .ndo_poll_controller    = bnxt_poll_controller,
7432 #endif
7433         .ndo_setup_tc           = bnxt_setup_tc,
7434 #ifdef CONFIG_RFS_ACCEL
7435         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
7436 #endif
7437         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
7438         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
7439         .ndo_xdp                = bnxt_xdp,
7440 };
7441
7442 static void bnxt_remove_one(struct pci_dev *pdev)
7443 {
7444         struct net_device *dev = pci_get_drvdata(pdev);
7445         struct bnxt *bp = netdev_priv(dev);
7446
7447         if (BNXT_PF(bp))
7448                 bnxt_sriov_disable(bp);
7449
7450         pci_disable_pcie_error_reporting(pdev);
7451         unregister_netdev(dev);
7452         cancel_work_sync(&bp->sp_task);
7453         bp->sp_event = 0;
7454
7455         bnxt_clear_int_mode(bp);
7456         bnxt_hwrm_func_drv_unrgtr(bp);
7457         bnxt_free_hwrm_resources(bp);
7458         bnxt_free_hwrm_short_cmd_req(bp);
7459         bnxt_ethtool_free(bp);
7460         bnxt_dcb_free(bp);
7461         kfree(bp->edev);
7462         bp->edev = NULL;
7463         if (bp->xdp_prog)
7464                 bpf_prog_put(bp->xdp_prog);
7465         bnxt_cleanup_pci(bp);
7466         free_netdev(dev);
7467 }
7468
7469 static int bnxt_probe_phy(struct bnxt *bp)
7470 {
7471         int rc = 0;
7472         struct bnxt_link_info *link_info = &bp->link_info;
7473
7474         rc = bnxt_hwrm_phy_qcaps(bp);
7475         if (rc) {
7476                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
7477                            rc);
7478                 return rc;
7479         }
7480
7481         rc = bnxt_update_link(bp, false);
7482         if (rc) {
7483                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
7484                            rc);
7485                 return rc;
7486         }
7487
7488         /* Older firmware does not have supported_auto_speeds, so assume
7489          * that all supported speeds can be autonegotiated.
7490          */
7491         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
7492                 link_info->support_auto_speeds = link_info->support_speeds;
7493
7494         /*initialize the ethool setting copy with NVM settings */
7495         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
7496                 link_info->autoneg = BNXT_AUTONEG_SPEED;
7497                 if (bp->hwrm_spec_code >= 0x10201) {
7498                         if (link_info->auto_pause_setting &
7499                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
7500                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7501                 } else {
7502                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
7503                 }
7504                 link_info->advertising = link_info->auto_link_speeds;
7505         } else {
7506                 link_info->req_link_speed = link_info->force_link_speed;
7507                 link_info->req_duplex = link_info->duplex_setting;
7508         }
7509         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
7510                 link_info->req_flow_ctrl =
7511                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
7512         else
7513                 link_info->req_flow_ctrl = link_info->force_pause_setting;
7514         return rc;
7515 }
7516
7517 static int bnxt_get_max_irq(struct pci_dev *pdev)
7518 {
7519         u16 ctrl;
7520
7521         if (!pdev->msix_cap)
7522                 return 1;
7523
7524         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
7525         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
7526 }
7527
7528 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7529                                 int *max_cp)
7530 {
7531         int max_ring_grps = 0;
7532
7533 #ifdef CONFIG_BNXT_SRIOV
7534         if (!BNXT_PF(bp)) {
7535                 *max_tx = bp->vf.max_tx_rings;
7536                 *max_rx = bp->vf.max_rx_rings;
7537                 *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
7538                 *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
7539                 max_ring_grps = bp->vf.max_hw_ring_grps;
7540         } else
7541 #endif
7542         {
7543                 *max_tx = bp->pf.max_tx_rings;
7544                 *max_rx = bp->pf.max_rx_rings;
7545                 *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
7546                 *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
7547                 max_ring_grps = bp->pf.max_hw_ring_grps;
7548         }
7549         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
7550                 *max_cp -= 1;
7551                 *max_rx -= 2;
7552         }
7553         if (bp->flags & BNXT_FLAG_AGG_RINGS)
7554                 *max_rx >>= 1;
7555         *max_rx = min_t(int, *max_rx, max_ring_grps);
7556 }
7557
7558 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
7559 {
7560         int rx, tx, cp;
7561
7562         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
7563         if (!rx || !tx || !cp)
7564                 return -ENOMEM;
7565
7566         *max_rx = rx;
7567         *max_tx = tx;
7568         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
7569 }
7570
7571 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
7572                                bool shared)
7573 {
7574         int rc;
7575
7576         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7577         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
7578                 /* Not enough rings, try disabling agg rings. */
7579                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
7580                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
7581                 if (rc)
7582                         return rc;
7583                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
7584                 bp->dev->hw_features &= ~NETIF_F_LRO;
7585                 bp->dev->features &= ~NETIF_F_LRO;
7586                 bnxt_set_ring_params(bp);
7587         }
7588
7589         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
7590                 int max_cp, max_stat, max_irq;
7591
7592                 /* Reserve minimum resources for RoCE */
7593                 max_cp = bnxt_get_max_func_cp_rings(bp);
7594                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
7595                 max_irq = bnxt_get_max_func_irqs(bp);
7596                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
7597                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
7598                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
7599                         return 0;
7600
7601                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
7602                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
7603                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
7604                 max_cp = min_t(int, max_cp, max_irq);
7605                 max_cp = min_t(int, max_cp, max_stat);
7606                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
7607                 if (rc)
7608                         rc = 0;
7609         }
7610         return rc;
7611 }
7612
7613 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
7614 {
7615         int dflt_rings, max_rx_rings, max_tx_rings, rc;
7616
7617         if (sh)
7618                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
7619         dflt_rings = netif_get_num_default_rss_queues();
7620         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
7621         if (rc)
7622                 return rc;
7623         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
7624         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
7625
7626         rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
7627         if (rc)
7628                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
7629
7630         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
7631         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7632                                bp->tx_nr_rings + bp->rx_nr_rings;
7633         bp->num_stat_ctxs = bp->cp_nr_rings;
7634         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7635                 bp->rx_nr_rings++;
7636                 bp->cp_nr_rings++;
7637         }
7638         return rc;
7639 }
7640
7641 void bnxt_restore_pf_fw_resources(struct bnxt *bp)
7642 {
7643         ASSERT_RTNL();
7644         bnxt_hwrm_func_qcaps(bp);
7645         bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
7646 }
7647
7648 static void bnxt_parse_log_pcie_link(struct bnxt *bp)
7649 {
7650         enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
7651         enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
7652
7653         if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
7654             speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
7655                 netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
7656         else
7657                 netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
7658                             speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
7659                             speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
7660                             speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
7661                             "Unknown", width);
7662 }
7663
7664 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7665 {
7666         static int version_printed;
7667         struct net_device *dev;
7668         struct bnxt *bp;
7669         int rc, max_irqs;
7670
7671         if (pci_is_bridge(pdev))
7672                 return -ENODEV;
7673
7674         if (version_printed++ == 0)
7675                 pr_info("%s", version);
7676
7677         max_irqs = bnxt_get_max_irq(pdev);
7678         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
7679         if (!dev)
7680                 return -ENOMEM;
7681
7682         bp = netdev_priv(dev);
7683
7684         if (bnxt_vf_pciid(ent->driver_data))
7685                 bp->flags |= BNXT_FLAG_VF;
7686
7687         if (pdev->msix_cap)
7688                 bp->flags |= BNXT_FLAG_MSIX_CAP;
7689
7690         rc = bnxt_init_board(pdev, dev);
7691         if (rc < 0)
7692                 goto init_err_free;
7693
7694         dev->netdev_ops = &bnxt_netdev_ops;
7695         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
7696         dev->ethtool_ops = &bnxt_ethtool_ops;
7697         pci_set_drvdata(pdev, dev);
7698
7699         rc = bnxt_alloc_hwrm_resources(bp);
7700         if (rc)
7701                 goto init_err_pci_clean;
7702
7703         mutex_init(&bp->hwrm_cmd_lock);
7704         rc = bnxt_hwrm_ver_get(bp);
7705         if (rc)
7706                 goto init_err_pci_clean;
7707
7708         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
7709                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
7710                 if (rc)
7711                         goto init_err_pci_clean;
7712         }
7713
7714         rc = bnxt_hwrm_func_reset(bp);
7715         if (rc)
7716                 goto init_err_pci_clean;
7717
7718         bnxt_hwrm_fw_set_time(bp);
7719
7720         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7721                            NETIF_F_TSO | NETIF_F_TSO6 |
7722                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7723                            NETIF_F_GSO_IPXIP4 |
7724                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7725                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
7726                            NETIF_F_RXCSUM | NETIF_F_GRO;
7727
7728         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
7729                 dev->hw_features |= NETIF_F_LRO;
7730
7731         dev->hw_enc_features =
7732                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
7733                         NETIF_F_TSO | NETIF_F_TSO6 |
7734                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
7735                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
7736                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
7737         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
7738                                     NETIF_F_GSO_GRE_CSUM;
7739         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
7740         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
7741                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
7742         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
7743         dev->priv_flags |= IFF_UNICAST_FLT;
7744
7745         /* MTU range: 60 - 9500 */
7746         dev->min_mtu = ETH_ZLEN;
7747         dev->max_mtu = BNXT_MAX_MTU;
7748
7749 #ifdef CONFIG_BNXT_SRIOV
7750         init_waitqueue_head(&bp->sriov_cfg_wait);
7751 #endif
7752         bp->gro_func = bnxt_gro_func_5730x;
7753         if (BNXT_CHIP_P4_PLUS(bp))
7754                 bp->gro_func = bnxt_gro_func_5731x;
7755         else
7756                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
7757
7758         rc = bnxt_hwrm_func_drv_rgtr(bp);
7759         if (rc)
7760                 goto init_err_pci_clean;
7761
7762         rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
7763         if (rc)
7764                 goto init_err_pci_clean;
7765
7766         bp->ulp_probe = bnxt_ulp_probe;
7767
7768         /* Get the MAX capabilities for this function */
7769         rc = bnxt_hwrm_func_qcaps(bp);
7770         if (rc) {
7771                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
7772                            rc);
7773                 rc = -1;
7774                 goto init_err_pci_clean;
7775         }
7776
7777         rc = bnxt_hwrm_queue_qportcfg(bp);
7778         if (rc) {
7779                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
7780                            rc);
7781                 rc = -1;
7782                 goto init_err_pci_clean;
7783         }
7784
7785         bnxt_hwrm_func_qcfg(bp);
7786         bnxt_hwrm_port_led_qcaps(bp);
7787         bnxt_ethtool_init(bp);
7788         bnxt_dcb_init(bp);
7789
7790         bnxt_set_rx_skb_mode(bp, false);
7791         bnxt_set_tpa_flags(bp);
7792         bnxt_set_ring_params(bp);
7793         bnxt_set_max_func_irqs(bp, max_irqs);
7794         rc = bnxt_set_dflt_rings(bp, true);
7795         if (rc) {
7796                 netdev_err(bp->dev, "Not enough rings available.\n");
7797                 rc = -ENOMEM;
7798                 goto init_err_pci_clean;
7799         }
7800
7801         /* Default RSS hash cfg. */
7802         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
7803                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
7804                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
7805                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
7806         if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
7807                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
7808                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
7809                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
7810         }
7811
7812         bnxt_hwrm_vnic_qcaps(bp);
7813         if (bnxt_rfs_supported(bp)) {
7814                 dev->hw_features |= NETIF_F_NTUPLE;
7815                 if (bnxt_rfs_capable(bp)) {
7816                         bp->flags |= BNXT_FLAG_RFS;
7817                         dev->features |= NETIF_F_NTUPLE;
7818                 }
7819         }
7820
7821         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
7822                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
7823
7824         rc = bnxt_probe_phy(bp);
7825         if (rc)
7826                 goto init_err_pci_clean;
7827
7828         rc = bnxt_init_int_mode(bp);
7829         if (rc)
7830                 goto init_err_pci_clean;
7831
7832         bnxt_get_wol_settings(bp);
7833         if (bp->flags & BNXT_FLAG_WOL_CAP)
7834                 device_set_wakeup_enable(&pdev->dev, bp->wol);
7835         else
7836                 device_set_wakeup_capable(&pdev->dev, false);
7837
7838         rc = register_netdev(dev);
7839         if (rc)
7840                 goto init_err_clr_int;
7841
7842         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
7843                     board_info[ent->driver_data].name,
7844                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
7845
7846         bnxt_parse_log_pcie_link(bp);
7847
7848         return 0;
7849
7850 init_err_clr_int:
7851         bnxt_clear_int_mode(bp);
7852
7853 init_err_pci_clean:
7854         bnxt_cleanup_pci(bp);
7855
7856 init_err_free:
7857         free_netdev(dev);
7858         return rc;
7859 }
7860
7861 static void bnxt_shutdown(struct pci_dev *pdev)
7862 {
7863         struct net_device *dev = pci_get_drvdata(pdev);
7864         struct bnxt *bp;
7865
7866         if (!dev)
7867                 return;
7868
7869         rtnl_lock();
7870         bp = netdev_priv(dev);
7871         if (!bp)
7872                 goto shutdown_exit;
7873
7874         if (netif_running(dev))
7875                 dev_close(dev);
7876
7877         if (system_state == SYSTEM_POWER_OFF) {
7878                 bnxt_ulp_shutdown(bp);
7879                 bnxt_clear_int_mode(bp);
7880                 pci_wake_from_d3(pdev, bp->wol);
7881                 pci_set_power_state(pdev, PCI_D3hot);
7882         }
7883
7884 shutdown_exit:
7885         rtnl_unlock();
7886 }
7887
7888 #ifdef CONFIG_PM_SLEEP
7889 static int bnxt_suspend(struct device *device)
7890 {
7891         struct pci_dev *pdev = to_pci_dev(device);
7892         struct net_device *dev = pci_get_drvdata(pdev);
7893         struct bnxt *bp = netdev_priv(dev);
7894         int rc = 0;
7895
7896         rtnl_lock();
7897         if (netif_running(dev)) {
7898                 netif_device_detach(dev);
7899                 rc = bnxt_close(dev);
7900         }
7901         bnxt_hwrm_func_drv_unrgtr(bp);
7902         rtnl_unlock();
7903         return rc;
7904 }
7905
7906 static int bnxt_resume(struct device *device)
7907 {
7908         struct pci_dev *pdev = to_pci_dev(device);
7909         struct net_device *dev = pci_get_drvdata(pdev);
7910         struct bnxt *bp = netdev_priv(dev);
7911         int rc = 0;
7912
7913         rtnl_lock();
7914         if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
7915                 rc = -ENODEV;
7916                 goto resume_exit;
7917         }
7918         rc = bnxt_hwrm_func_reset(bp);
7919         if (rc) {
7920                 rc = -EBUSY;
7921                 goto resume_exit;
7922         }
7923         bnxt_get_wol_settings(bp);
7924         if (netif_running(dev)) {
7925                 rc = bnxt_open(dev);
7926                 if (!rc)
7927                         netif_device_attach(dev);
7928         }
7929
7930 resume_exit:
7931         rtnl_unlock();
7932         return rc;
7933 }
7934
7935 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
7936 #define BNXT_PM_OPS (&bnxt_pm_ops)
7937
7938 #else
7939
7940 #define BNXT_PM_OPS NULL
7941
7942 #endif /* CONFIG_PM_SLEEP */
7943
7944 /**
7945  * bnxt_io_error_detected - called when PCI error is detected
7946  * @pdev: Pointer to PCI device
7947  * @state: The current pci connection state
7948  *
7949  * This function is called after a PCI bus error affecting
7950  * this device has been detected.
7951  */
7952 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
7953                                                pci_channel_state_t state)
7954 {
7955         struct net_device *netdev = pci_get_drvdata(pdev);
7956         struct bnxt *bp = netdev_priv(netdev);
7957
7958         netdev_info(netdev, "PCI I/O error detected\n");
7959
7960         rtnl_lock();
7961         netif_device_detach(netdev);
7962
7963         bnxt_ulp_stop(bp);
7964
7965         if (state == pci_channel_io_perm_failure) {
7966                 rtnl_unlock();
7967                 return PCI_ERS_RESULT_DISCONNECT;
7968         }
7969
7970         if (netif_running(netdev))
7971                 bnxt_close(netdev);
7972
7973         pci_disable_device(pdev);
7974         rtnl_unlock();
7975
7976         /* Request a slot slot reset. */
7977         return PCI_ERS_RESULT_NEED_RESET;
7978 }
7979
7980 /**
7981  * bnxt_io_slot_reset - called after the pci bus has been reset.
7982  * @pdev: Pointer to PCI device
7983  *
7984  * Restart the card from scratch, as if from a cold-boot.
7985  * At this point, the card has exprienced a hard reset,
7986  * followed by fixups by BIOS, and has its config space
7987  * set up identically to what it was at cold boot.
7988  */
7989 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
7990 {
7991         struct net_device *netdev = pci_get_drvdata(pdev);
7992         struct bnxt *bp = netdev_priv(netdev);
7993         int err = 0;
7994         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
7995
7996         netdev_info(bp->dev, "PCI Slot Reset\n");
7997
7998         rtnl_lock();
7999
8000         if (pci_enable_device(pdev)) {
8001                 dev_err(&pdev->dev,
8002                         "Cannot re-enable PCI device after reset.\n");
8003         } else {
8004                 pci_set_master(pdev);
8005
8006                 err = bnxt_hwrm_func_reset(bp);
8007                 if (!err && netif_running(netdev))
8008                         err = bnxt_open(netdev);
8009
8010                 if (!err) {
8011                         result = PCI_ERS_RESULT_RECOVERED;
8012                         bnxt_ulp_start(bp);
8013                 }
8014         }
8015
8016         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
8017                 dev_close(netdev);
8018
8019         rtnl_unlock();
8020
8021         err = pci_cleanup_aer_uncorrect_error_status(pdev);
8022         if (err) {
8023                 dev_err(&pdev->dev,
8024                         "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
8025                          err); /* non-fatal, continue */
8026         }
8027
8028         return PCI_ERS_RESULT_RECOVERED;
8029 }
8030
8031 /**
8032  * bnxt_io_resume - called when traffic can start flowing again.
8033  * @pdev: Pointer to PCI device
8034  *
8035  * This callback is called when the error recovery driver tells
8036  * us that its OK to resume normal operation.
8037  */
8038 static void bnxt_io_resume(struct pci_dev *pdev)
8039 {
8040         struct net_device *netdev = pci_get_drvdata(pdev);
8041
8042         rtnl_lock();
8043
8044         netif_device_attach(netdev);
8045
8046         rtnl_unlock();
8047 }
8048
8049 static const struct pci_error_handlers bnxt_err_handler = {
8050         .error_detected = bnxt_io_error_detected,
8051         .slot_reset     = bnxt_io_slot_reset,
8052         .resume         = bnxt_io_resume
8053 };
8054
8055 static struct pci_driver bnxt_pci_driver = {
8056         .name           = DRV_MODULE_NAME,
8057         .id_table       = bnxt_pci_tbl,
8058         .probe          = bnxt_init_one,
8059         .remove         = bnxt_remove_one,
8060         .shutdown       = bnxt_shutdown,
8061         .driver.pm      = BNXT_PM_OPS,
8062         .err_handler    = &bnxt_err_handler,
8063 #if defined(CONFIG_BNXT_SRIOV)
8064         .sriov_configure = bnxt_sriov_configure,
8065 #endif
8066 };
8067
8068 module_pci_driver(bnxt_pci_driver);