]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Merge branch 'for-4.8/core' of git://git.kernel.dk/linux-block
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: QLogic Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  * Copyright (c) 2014 QLogic Corporation
5  * All rights reserved
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation.
10  *
11  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12  * Written by: Eliezer Tamir
13  * Based on code from Michael Chan's bnx2 driver
14  * UDP CSUM errata workaround by Arik Gendelman
15  * Slowpath and fastpath rework by Vladislav Zolotarov
16  * Statistics and Link management by Yitchak Gertner
17  *
18  */
19
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kernel.h>
25 #include <linux/device.h>  /* for dev_info() */
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/ioport.h>
29 #include <linux/slab.h>
30 #include <linux/interrupt.h>
31 #include <linux/pci.h>
32 #include <linux/aer.h>
33 #include <linux/init.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/skbuff.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/bitops.h>
39 #include <linux/irq.h>
40 #include <linux/delay.h>
41 #include <asm/byteorder.h>
42 #include <linux/time.h>
43 #include <linux/ethtool.h>
44 #include <linux/mii.h>
45 #include <linux/if_vlan.h>
46 #include <linux/crash_dump.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/tcp.h>
50 #include <net/vxlan.h>
51 #include <net/checksum.h>
52 #include <net/ip6_checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/crc32c.h>
56 #include <linux/prefetch.h>
57 #include <linux/zlib.h>
58 #include <linux/io.h>
59 #include <linux/semaphore.h>
60 #include <linux/stringify.h>
61 #include <linux/vmalloc.h>
62 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
63 #include <net/geneve.h>
64 #endif
65 #include "bnx2x.h"
66 #include "bnx2x_init.h"
67 #include "bnx2x_init_ops.h"
68 #include "bnx2x_cmn.h"
69 #include "bnx2x_vfpf.h"
70 #include "bnx2x_dcb.h"
71 #include "bnx2x_sp.h"
72 #include <linux/firmware.h>
73 #include "bnx2x_fw_file_hdr.h"
74 /* FW files */
75 #define FW_FILE_VERSION                                 \
76         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
77         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
78         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
79         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
80 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
81 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
82 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
83
84 /* Time in jiffies before concluding the transmitter is hung */
85 #define TX_TIMEOUT              (5*HZ)
86
87 static char version[] =
88         "QLogic 5771x/578xx 10/20-Gigabit Ethernet Driver "
89         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
90
91 MODULE_AUTHOR("Eliezer Tamir");
92 MODULE_DESCRIPTION("QLogic "
93                    "BCM57710/57711/57711E/"
94                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
95                    "57840/57840_MF Driver");
96 MODULE_LICENSE("GPL");
97 MODULE_VERSION(DRV_MODULE_VERSION);
98 MODULE_FIRMWARE(FW_FILE_NAME_E1);
99 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
100 MODULE_FIRMWARE(FW_FILE_NAME_E2);
101
102 int bnx2x_num_queues;
103 module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
104 MODULE_PARM_DESC(num_queues,
105                  " Set number of queues (default is as a number of CPUs)");
106
107 static int disable_tpa;
108 module_param(disable_tpa, int, S_IRUGO);
109 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
110
111 static int int_mode;
112 module_param(int_mode, int, S_IRUGO);
113 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
114                                 "(1 INT#x; 2 MSI)");
115
116 static int dropless_fc;
117 module_param(dropless_fc, int, S_IRUGO);
118 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
119
120 static int mrrs = -1;
121 module_param(mrrs, int, S_IRUGO);
122 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123
124 static int debug;
125 module_param(debug, int, S_IRUGO);
126 MODULE_PARM_DESC(debug, " Default debug msglevel");
127
128 static struct workqueue_struct *bnx2x_wq;
129 struct workqueue_struct *bnx2x_iov_wq;
130
131 struct bnx2x_mac_vals {
132         u32 xmac_addr;
133         u32 xmac_val;
134         u32 emac_addr;
135         u32 emac_val;
136         u32 umac_addr[2];
137         u32 umac_val[2];
138         u32 bmac_addr;
139         u32 bmac_val[2];
140 };
141
142 enum bnx2x_board_type {
143         BCM57710 = 0,
144         BCM57711,
145         BCM57711E,
146         BCM57712,
147         BCM57712_MF,
148         BCM57712_VF,
149         BCM57800,
150         BCM57800_MF,
151         BCM57800_VF,
152         BCM57810,
153         BCM57810_MF,
154         BCM57810_VF,
155         BCM57840_4_10,
156         BCM57840_2_20,
157         BCM57840_MF,
158         BCM57840_VF,
159         BCM57811,
160         BCM57811_MF,
161         BCM57840_O,
162         BCM57840_MFO,
163         BCM57811_VF
164 };
165
166 /* indexed by board_type, above */
167 static struct {
168         char *name;
169 } board_info[] = {
170         [BCM57710]      = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
171         [BCM57711]      = { "QLogic BCM57711 10 Gigabit PCIe" },
172         [BCM57711E]     = { "QLogic BCM57711E 10 Gigabit PCIe" },
173         [BCM57712]      = { "QLogic BCM57712 10 Gigabit Ethernet" },
174         [BCM57712_MF]   = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
175         [BCM57712_VF]   = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
176         [BCM57800]      = { "QLogic BCM57800 10 Gigabit Ethernet" },
177         [BCM57800_MF]   = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
178         [BCM57800_VF]   = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
179         [BCM57810]      = { "QLogic BCM57810 10 Gigabit Ethernet" },
180         [BCM57810_MF]   = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
181         [BCM57810_VF]   = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
182         [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
183         [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
184         [BCM57840_MF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
185         [BCM57840_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
186         [BCM57811]      = { "QLogic BCM57811 10 Gigabit Ethernet" },
187         [BCM57811_MF]   = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
188         [BCM57840_O]    = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
189         [BCM57840_MFO]  = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
190         [BCM57811_VF]   = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
191 };
192
193 #ifndef PCI_DEVICE_ID_NX2_57710
194 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
195 #endif
196 #ifndef PCI_DEVICE_ID_NX2_57711
197 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
198 #endif
199 #ifndef PCI_DEVICE_ID_NX2_57711E
200 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
201 #endif
202 #ifndef PCI_DEVICE_ID_NX2_57712
203 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
204 #endif
205 #ifndef PCI_DEVICE_ID_NX2_57712_MF
206 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
207 #endif
208 #ifndef PCI_DEVICE_ID_NX2_57712_VF
209 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
210 #endif
211 #ifndef PCI_DEVICE_ID_NX2_57800
212 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
213 #endif
214 #ifndef PCI_DEVICE_ID_NX2_57800_MF
215 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
216 #endif
217 #ifndef PCI_DEVICE_ID_NX2_57800_VF
218 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
219 #endif
220 #ifndef PCI_DEVICE_ID_NX2_57810
221 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
222 #endif
223 #ifndef PCI_DEVICE_ID_NX2_57810_MF
224 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
225 #endif
226 #ifndef PCI_DEVICE_ID_NX2_57840_O
227 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
228 #endif
229 #ifndef PCI_DEVICE_ID_NX2_57810_VF
230 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
231 #endif
232 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
233 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
234 #endif
235 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
236 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
237 #endif
238 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
239 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
240 #endif
241 #ifndef PCI_DEVICE_ID_NX2_57840_MF
242 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
243 #endif
244 #ifndef PCI_DEVICE_ID_NX2_57840_VF
245 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
246 #endif
247 #ifndef PCI_DEVICE_ID_NX2_57811
248 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
249 #endif
250 #ifndef PCI_DEVICE_ID_NX2_57811_MF
251 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
252 #endif
253 #ifndef PCI_DEVICE_ID_NX2_57811_VF
254 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
255 #endif
256
257 static const struct pci_device_id bnx2x_pci_tbl[] = {
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
271         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
272         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
273         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
274         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
275         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
276         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
277         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
278         { PCI_VDEVICE(QLOGIC,   PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
279         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
280         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
281         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
282         { 0 }
283 };
284
285 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
286
287 /* Global resources for unloading a previously loaded device */
288 #define BNX2X_PREV_WAIT_NEEDED 1
289 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
290 static LIST_HEAD(bnx2x_prev_list);
291
292 /* Forward declaration */
293 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
294 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
295 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
296
297 /****************************************************************************
298 * General service functions
299 ****************************************************************************/
300
301 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
302
303 static void __storm_memset_dma_mapping(struct bnx2x *bp,
304                                        u32 addr, dma_addr_t mapping)
305 {
306         REG_WR(bp,  addr, U64_LO(mapping));
307         REG_WR(bp,  addr + 4, U64_HI(mapping));
308 }
309
310 static void storm_memset_spq_addr(struct bnx2x *bp,
311                                   dma_addr_t mapping, u16 abs_fid)
312 {
313         u32 addr = XSEM_REG_FAST_MEMORY +
314                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
315
316         __storm_memset_dma_mapping(bp, addr, mapping);
317 }
318
319 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
320                                   u16 pf_id)
321 {
322         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
323                 pf_id);
324         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
325                 pf_id);
326         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
327                 pf_id);
328         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
329                 pf_id);
330 }
331
332 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
333                                  u8 enable)
334 {
335         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
336                 enable);
337         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
338                 enable);
339         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
340                 enable);
341         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
342                 enable);
343 }
344
345 static void storm_memset_eq_data(struct bnx2x *bp,
346                                  struct event_ring_data *eq_data,
347                                 u16 pfid)
348 {
349         size_t size = sizeof(struct event_ring_data);
350
351         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
352
353         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
354 }
355
356 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
357                                  u16 pfid)
358 {
359         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
360         REG_WR16(bp, addr, eq_prod);
361 }
362
363 /* used only at init
364  * locking is done by mcp
365  */
366 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
367 {
368         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
369         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
370         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
371                                PCICFG_VENDOR_ID_OFFSET);
372 }
373
374 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
375 {
376         u32 val;
377
378         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
379         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
380         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
381                                PCICFG_VENDOR_ID_OFFSET);
382
383         return val;
384 }
385
386 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
387 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
388 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
389 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
390 #define DMAE_DP_DST_NONE        "dst_addr [none]"
391
392 static void bnx2x_dp_dmae(struct bnx2x *bp,
393                           struct dmae_command *dmae, int msglvl)
394 {
395         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
396         int i;
397
398         switch (dmae->opcode & DMAE_COMMAND_DST) {
399         case DMAE_CMD_DST_PCI:
400                 if (src_type == DMAE_CMD_SRC_PCI)
401                         DP(msglvl, "DMAE: opcode 0x%08x\n"
402                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
403                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
404                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
406                            dmae->comp_addr_hi, dmae->comp_addr_lo,
407                            dmae->comp_val);
408                 else
409                         DP(msglvl, "DMAE: opcode 0x%08x\n"
410                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
411                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
412                            dmae->opcode, dmae->src_addr_lo >> 2,
413                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
414                            dmae->comp_addr_hi, dmae->comp_addr_lo,
415                            dmae->comp_val);
416                 break;
417         case DMAE_CMD_DST_GRC:
418                 if (src_type == DMAE_CMD_SRC_PCI)
419                         DP(msglvl, "DMAE: opcode 0x%08x\n"
420                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
421                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
422                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423                            dmae->len, dmae->dst_addr_lo >> 2,
424                            dmae->comp_addr_hi, dmae->comp_addr_lo,
425                            dmae->comp_val);
426                 else
427                         DP(msglvl, "DMAE: opcode 0x%08x\n"
428                            "src [%08x], len [%d*4], dst [%08x]\n"
429                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
430                            dmae->opcode, dmae->src_addr_lo >> 2,
431                            dmae->len, dmae->dst_addr_lo >> 2,
432                            dmae->comp_addr_hi, dmae->comp_addr_lo,
433                            dmae->comp_val);
434                 break;
435         default:
436                 if (src_type == DMAE_CMD_SRC_PCI)
437                         DP(msglvl, "DMAE: opcode 0x%08x\n"
438                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
439                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
440                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
441                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
442                            dmae->comp_val);
443                 else
444                         DP(msglvl, "DMAE: opcode 0x%08x\n"
445                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
446                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
447                            dmae->opcode, dmae->src_addr_lo >> 2,
448                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
449                            dmae->comp_val);
450                 break;
451         }
452
453         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
454                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
455                    i, *(((u32 *)dmae) + i));
456 }
457
458 /* copy command into DMAE command memory and set DMAE command go */
459 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
460 {
461         u32 cmd_offset;
462         int i;
463
464         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
465         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
466                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
467         }
468         REG_WR(bp, dmae_reg_go_c[idx], 1);
469 }
470
471 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
472 {
473         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
474                            DMAE_CMD_C_ENABLE);
475 }
476
477 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
478 {
479         return opcode & ~DMAE_CMD_SRC_RESET;
480 }
481
482 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
483                              bool with_comp, u8 comp_type)
484 {
485         u32 opcode = 0;
486
487         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
488                    (dst_type << DMAE_COMMAND_DST_SHIFT));
489
490         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
491
492         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
493         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
494                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
495         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
496
497 #ifdef __BIG_ENDIAN
498         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
499 #else
500         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
501 #endif
502         if (with_comp)
503                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
504         return opcode;
505 }
506
507 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
508                                       struct dmae_command *dmae,
509                                       u8 src_type, u8 dst_type)
510 {
511         memset(dmae, 0, sizeof(struct dmae_command));
512
513         /* set the opcode */
514         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
515                                          true, DMAE_COMP_PCI);
516
517         /* fill in the completion parameters */
518         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
519         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
520         dmae->comp_val = DMAE_COMP_VAL;
521 }
522
523 /* issue a dmae command over the init-channel and wait for completion */
524 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
525                                u32 *comp)
526 {
527         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
528         int rc = 0;
529
530         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
531
532         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
533          * as long as this code is called both from syscall context and
534          * from ndo_set_rx_mode() flow that may be called from BH.
535          */
536
537         spin_lock_bh(&bp->dmae_lock);
538
539         /* reset completion */
540         *comp = 0;
541
542         /* post the command on the channel used for initializations */
543         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
544
545         /* wait for completion */
546         udelay(5);
547         while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
548
549                 if (!cnt ||
550                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
551                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
552                         BNX2X_ERR("DMAE timeout!\n");
553                         rc = DMAE_TIMEOUT;
554                         goto unlock;
555                 }
556                 cnt--;
557                 udelay(50);
558         }
559         if (*comp & DMAE_PCI_ERR_FLAG) {
560                 BNX2X_ERR("DMAE PCI error!\n");
561                 rc = DMAE_PCI_ERROR;
562         }
563
564 unlock:
565
566         spin_unlock_bh(&bp->dmae_lock);
567
568         return rc;
569 }
570
571 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
572                       u32 len32)
573 {
574         int rc;
575         struct dmae_command dmae;
576
577         if (!bp->dmae_ready) {
578                 u32 *data = bnx2x_sp(bp, wb_data[0]);
579
580                 if (CHIP_IS_E1(bp))
581                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
582                 else
583                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
584                 return;
585         }
586
587         /* set opcode and fixed command fields */
588         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
589
590         /* fill in addresses and len */
591         dmae.src_addr_lo = U64_LO(dma_addr);
592         dmae.src_addr_hi = U64_HI(dma_addr);
593         dmae.dst_addr_lo = dst_addr >> 2;
594         dmae.dst_addr_hi = 0;
595         dmae.len = len32;
596
597         /* issue the command and wait for completion */
598         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
599         if (rc) {
600                 BNX2X_ERR("DMAE returned failure %d\n", rc);
601 #ifdef BNX2X_STOP_ON_ERROR
602                 bnx2x_panic();
603 #endif
604         }
605 }
606
607 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
608 {
609         int rc;
610         struct dmae_command dmae;
611
612         if (!bp->dmae_ready) {
613                 u32 *data = bnx2x_sp(bp, wb_data[0]);
614                 int i;
615
616                 if (CHIP_IS_E1(bp))
617                         for (i = 0; i < len32; i++)
618                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
619                 else
620                         for (i = 0; i < len32; i++)
621                                 data[i] = REG_RD(bp, src_addr + i*4);
622
623                 return;
624         }
625
626         /* set opcode and fixed command fields */
627         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
628
629         /* fill in addresses and len */
630         dmae.src_addr_lo = src_addr >> 2;
631         dmae.src_addr_hi = 0;
632         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
633         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
634         dmae.len = len32;
635
636         /* issue the command and wait for completion */
637         rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
638         if (rc) {
639                 BNX2X_ERR("DMAE returned failure %d\n", rc);
640 #ifdef BNX2X_STOP_ON_ERROR
641                 bnx2x_panic();
642 #endif
643         }
644 }
645
646 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
647                                       u32 addr, u32 len)
648 {
649         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
650         int offset = 0;
651
652         while (len > dmae_wr_max) {
653                 bnx2x_write_dmae(bp, phys_addr + offset,
654                                  addr + offset, dmae_wr_max);
655                 offset += dmae_wr_max * 4;
656                 len -= dmae_wr_max;
657         }
658
659         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
660 }
661
662 enum storms {
663            XSTORM,
664            TSTORM,
665            CSTORM,
666            USTORM,
667            MAX_STORMS
668 };
669
670 #define STORMS_NUM 4
671 #define REGS_IN_ENTRY 4
672
673 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
674                                               enum storms storm,
675                                               int entry)
676 {
677         switch (storm) {
678         case XSTORM:
679                 return XSTORM_ASSERT_LIST_OFFSET(entry);
680         case TSTORM:
681                 return TSTORM_ASSERT_LIST_OFFSET(entry);
682         case CSTORM:
683                 return CSTORM_ASSERT_LIST_OFFSET(entry);
684         case USTORM:
685                 return USTORM_ASSERT_LIST_OFFSET(entry);
686         case MAX_STORMS:
687         default:
688                 BNX2X_ERR("unknown storm\n");
689         }
690         return -EINVAL;
691 }
692
693 static int bnx2x_mc_assert(struct bnx2x *bp)
694 {
695         char last_idx;
696         int i, j, rc = 0;
697         enum storms storm;
698         u32 regs[REGS_IN_ENTRY];
699         u32 bar_storm_intmem[STORMS_NUM] = {
700                 BAR_XSTRORM_INTMEM,
701                 BAR_TSTRORM_INTMEM,
702                 BAR_CSTRORM_INTMEM,
703                 BAR_USTRORM_INTMEM
704         };
705         u32 storm_assert_list_index[STORMS_NUM] = {
706                 XSTORM_ASSERT_LIST_INDEX_OFFSET,
707                 TSTORM_ASSERT_LIST_INDEX_OFFSET,
708                 CSTORM_ASSERT_LIST_INDEX_OFFSET,
709                 USTORM_ASSERT_LIST_INDEX_OFFSET
710         };
711         char *storms_string[STORMS_NUM] = {
712                 "XSTORM",
713                 "TSTORM",
714                 "CSTORM",
715                 "USTORM"
716         };
717
718         for (storm = XSTORM; storm < MAX_STORMS; storm++) {
719                 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
720                                    storm_assert_list_index[storm]);
721                 if (last_idx)
722                         BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
723                                   storms_string[storm], last_idx);
724
725                 /* print the asserts */
726                 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
727                         /* read a single assert entry */
728                         for (j = 0; j < REGS_IN_ENTRY; j++)
729                                 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
730                                           bnx2x_get_assert_list_entry(bp,
731                                                                       storm,
732                                                                       i) +
733                                           sizeof(u32) * j);
734
735                         /* log entry if it contains a valid assert */
736                         if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
737                                 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
738                                           storms_string[storm], i, regs[3],
739                                           regs[2], regs[1], regs[0]);
740                                 rc++;
741                         } else {
742                                 break;
743                         }
744                 }
745         }
746
747         BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
748                   CHIP_IS_E1(bp) ? "everest1" :
749                   CHIP_IS_E1H(bp) ? "everest1h" :
750                   CHIP_IS_E2(bp) ? "everest2" : "everest3",
751                   BCM_5710_FW_MAJOR_VERSION,
752                   BCM_5710_FW_MINOR_VERSION,
753                   BCM_5710_FW_REVISION_VERSION);
754
755         return rc;
756 }
757
758 #define MCPR_TRACE_BUFFER_SIZE  (0x800)
759 #define SCRATCH_BUFFER_SIZE(bp) \
760         (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
761
762 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
763 {
764         u32 addr, val;
765         u32 mark, offset;
766         __be32 data[9];
767         int word;
768         u32 trace_shmem_base;
769         if (BP_NOMCP(bp)) {
770                 BNX2X_ERR("NO MCP - can not dump\n");
771                 return;
772         }
773         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
774                 (bp->common.bc_ver & 0xff0000) >> 16,
775                 (bp->common.bc_ver & 0xff00) >> 8,
776                 (bp->common.bc_ver & 0xff));
777
778         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
779         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
780                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
781
782         if (BP_PATH(bp) == 0)
783                 trace_shmem_base = bp->common.shmem_base;
784         else
785                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
786
787         /* sanity */
788         if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
789             trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
790                                 SCRATCH_BUFFER_SIZE(bp)) {
791                 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
792                           trace_shmem_base);
793                 return;
794         }
795
796         addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
797
798         /* validate TRCB signature */
799         mark = REG_RD(bp, addr);
800         if (mark != MFW_TRACE_SIGNATURE) {
801                 BNX2X_ERR("Trace buffer signature is missing.");
802                 return ;
803         }
804
805         /* read cyclic buffer pointer */
806         addr += 4;
807         mark = REG_RD(bp, addr);
808         mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
809         if (mark >= trace_shmem_base || mark < addr + 4) {
810                 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
811                 return;
812         }
813         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
814
815         printk("%s", lvl);
816
817         /* dump buffer after the mark */
818         for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
819                 for (word = 0; word < 8; word++)
820                         data[word] = htonl(REG_RD(bp, offset + 4*word));
821                 data[8] = 0x0;
822                 pr_cont("%s", (char *)data);
823         }
824
825         /* dump buffer before the mark */
826         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
827                 for (word = 0; word < 8; word++)
828                         data[word] = htonl(REG_RD(bp, offset + 4*word));
829                 data[8] = 0x0;
830                 pr_cont("%s", (char *)data);
831         }
832         printk("%s" "end of fw dump\n", lvl);
833 }
834
835 static void bnx2x_fw_dump(struct bnx2x *bp)
836 {
837         bnx2x_fw_dump_lvl(bp, KERN_ERR);
838 }
839
840 static void bnx2x_hc_int_disable(struct bnx2x *bp)
841 {
842         int port = BP_PORT(bp);
843         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
844         u32 val = REG_RD(bp, addr);
845
846         /* in E1 we must use only PCI configuration space to disable
847          * MSI/MSIX capability
848          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
849          */
850         if (CHIP_IS_E1(bp)) {
851                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
852                  * Use mask register to prevent from HC sending interrupts
853                  * after we exit the function
854                  */
855                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
856
857                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
858                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
859                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
860         } else
861                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
862                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
863                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
864                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
865
866         DP(NETIF_MSG_IFDOWN,
867            "write %x to HC %d (addr 0x%x)\n",
868            val, port, addr);
869
870         /* flush all outstanding writes */
871         mmiowb();
872
873         REG_WR(bp, addr, val);
874         if (REG_RD(bp, addr) != val)
875                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
876 }
877
878 static void bnx2x_igu_int_disable(struct bnx2x *bp)
879 {
880         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
881
882         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
883                  IGU_PF_CONF_INT_LINE_EN |
884                  IGU_PF_CONF_ATTN_BIT_EN);
885
886         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
887
888         /* flush all outstanding writes */
889         mmiowb();
890
891         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
892         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
893                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
894 }
895
896 static void bnx2x_int_disable(struct bnx2x *bp)
897 {
898         if (bp->common.int_block == INT_BLOCK_HC)
899                 bnx2x_hc_int_disable(bp);
900         else
901                 bnx2x_igu_int_disable(bp);
902 }
903
904 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
905 {
906         int i;
907         u16 j;
908         struct hc_sp_status_block_data sp_sb_data;
909         int func = BP_FUNC(bp);
910 #ifdef BNX2X_STOP_ON_ERROR
911         u16 start = 0, end = 0;
912         u8 cos;
913 #endif
914         if (IS_PF(bp) && disable_int)
915                 bnx2x_int_disable(bp);
916
917         bp->stats_state = STATS_STATE_DISABLED;
918         bp->eth_stats.unrecoverable_error++;
919         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
920
921         BNX2X_ERR("begin crash dump -----------------\n");
922
923         /* Indices */
924         /* Common */
925         if (IS_PF(bp)) {
926                 struct host_sp_status_block *def_sb = bp->def_status_blk;
927                 int data_size, cstorm_offset;
928
929                 BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
930                           bp->def_idx, bp->def_att_idx, bp->attn_state,
931                           bp->spq_prod_idx, bp->stats_counter);
932                 BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
933                           def_sb->atten_status_block.attn_bits,
934                           def_sb->atten_status_block.attn_bits_ack,
935                           def_sb->atten_status_block.status_block_id,
936                           def_sb->atten_status_block.attn_bits_index);
937                 BNX2X_ERR("     def (");
938                 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
939                         pr_cont("0x%x%s",
940                                 def_sb->sp_sb.index_values[i],
941                                 (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
942
943                 data_size = sizeof(struct hc_sp_status_block_data) /
944                             sizeof(u32);
945                 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
946                 for (i = 0; i < data_size; i++)
947                         *((u32 *)&sp_sb_data + i) =
948                                 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
949                                            i * sizeof(u32));
950
951                 pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
952                         sp_sb_data.igu_sb_id,
953                         sp_sb_data.igu_seg_id,
954                         sp_sb_data.p_func.pf_id,
955                         sp_sb_data.p_func.vnic_id,
956                         sp_sb_data.p_func.vf_id,
957                         sp_sb_data.p_func.vf_valid,
958                         sp_sb_data.state);
959         }
960
961         for_each_eth_queue(bp, i) {
962                 struct bnx2x_fastpath *fp = &bp->fp[i];
963                 int loop;
964                 struct hc_status_block_data_e2 sb_data_e2;
965                 struct hc_status_block_data_e1x sb_data_e1x;
966                 struct hc_status_block_sm  *hc_sm_p =
967                         CHIP_IS_E1x(bp) ?
968                         sb_data_e1x.common.state_machine :
969                         sb_data_e2.common.state_machine;
970                 struct hc_index_data *hc_index_p =
971                         CHIP_IS_E1x(bp) ?
972                         sb_data_e1x.index_data :
973                         sb_data_e2.index_data;
974                 u8 data_size, cos;
975                 u32 *sb_data_p;
976                 struct bnx2x_fp_txdata txdata;
977
978                 if (!bp->fp)
979                         break;
980
981                 if (!fp->rx_cons_sb)
982                         continue;
983
984                 /* Rx */
985                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
986                           i, fp->rx_bd_prod, fp->rx_bd_cons,
987                           fp->rx_comp_prod,
988                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
989                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
990                           fp->rx_sge_prod, fp->last_max_sge,
991                           le16_to_cpu(fp->fp_hc_idx));
992
993                 /* Tx */
994                 for_each_cos_in_tx_queue(fp, cos)
995                 {
996                         if (!fp->txdata_ptr[cos])
997                                 break;
998
999                         txdata = *fp->txdata_ptr[cos];
1000
1001                         if (!txdata.tx_cons_sb)
1002                                 continue;
1003
1004                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
1005                                   i, txdata.tx_pkt_prod,
1006                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
1007                                   txdata.tx_bd_cons,
1008                                   le16_to_cpu(*txdata.tx_cons_sb));
1009                 }
1010
1011                 loop = CHIP_IS_E1x(bp) ?
1012                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1013
1014                 /* host sb data */
1015
1016                 if (IS_FCOE_FP(fp))
1017                         continue;
1018
1019                 BNX2X_ERR("     run indexes (");
1020                 for (j = 0; j < HC_SB_MAX_SM; j++)
1021                         pr_cont("0x%x%s",
1022                                fp->sb_running_index[j],
1023                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1024
1025                 BNX2X_ERR("     indexes (");
1026                 for (j = 0; j < loop; j++)
1027                         pr_cont("0x%x%s",
1028                                fp->sb_index_values[j],
1029                                (j == loop - 1) ? ")" : " ");
1030
1031                 /* VF cannot access FW refelection for status block */
1032                 if (IS_VF(bp))
1033                         continue;
1034
1035                 /* fw sb data */
1036                 data_size = CHIP_IS_E1x(bp) ?
1037                         sizeof(struct hc_status_block_data_e1x) :
1038                         sizeof(struct hc_status_block_data_e2);
1039                 data_size /= sizeof(u32);
1040                 sb_data_p = CHIP_IS_E1x(bp) ?
1041                         (u32 *)&sb_data_e1x :
1042                         (u32 *)&sb_data_e2;
1043                 /* copy sb data in here */
1044                 for (j = 0; j < data_size; j++)
1045                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1046                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1047                                 j * sizeof(u32));
1048
1049                 if (!CHIP_IS_E1x(bp)) {
1050                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1051                                 sb_data_e2.common.p_func.pf_id,
1052                                 sb_data_e2.common.p_func.vf_id,
1053                                 sb_data_e2.common.p_func.vf_valid,
1054                                 sb_data_e2.common.p_func.vnic_id,
1055                                 sb_data_e2.common.same_igu_sb_1b,
1056                                 sb_data_e2.common.state);
1057                 } else {
1058                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1059                                 sb_data_e1x.common.p_func.pf_id,
1060                                 sb_data_e1x.common.p_func.vf_id,
1061                                 sb_data_e1x.common.p_func.vf_valid,
1062                                 sb_data_e1x.common.p_func.vnic_id,
1063                                 sb_data_e1x.common.same_igu_sb_1b,
1064                                 sb_data_e1x.common.state);
1065                 }
1066
1067                 /* SB_SMs data */
1068                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1069                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1070                                 j, hc_sm_p[j].__flags,
1071                                 hc_sm_p[j].igu_sb_id,
1072                                 hc_sm_p[j].igu_seg_id,
1073                                 hc_sm_p[j].time_to_expire,
1074                                 hc_sm_p[j].timer_value);
1075                 }
1076
1077                 /* Indices data */
1078                 for (j = 0; j < loop; j++) {
1079                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1080                                hc_index_p[j].flags,
1081                                hc_index_p[j].timeout);
1082                 }
1083         }
1084
1085 #ifdef BNX2X_STOP_ON_ERROR
1086         if (IS_PF(bp)) {
1087                 /* event queue */
1088                 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1089                 for (i = 0; i < NUM_EQ_DESC; i++) {
1090                         u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1091
1092                         BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1093                                   i, bp->eq_ring[i].message.opcode,
1094                                   bp->eq_ring[i].message.error);
1095                         BNX2X_ERR("data: %x %x %x\n",
1096                                   data[0], data[1], data[2]);
1097                 }
1098         }
1099
1100         /* Rings */
1101         /* Rx */
1102         for_each_valid_rx_queue(bp, i) {
1103                 struct bnx2x_fastpath *fp = &bp->fp[i];
1104
1105                 if (!bp->fp)
1106                         break;
1107
1108                 if (!fp->rx_cons_sb)
1109                         continue;
1110
1111                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1112                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1113                 for (j = start; j != end; j = RX_BD(j + 1)) {
1114                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1115                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1116
1117                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1118                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1119                 }
1120
1121                 start = RX_SGE(fp->rx_sge_prod);
1122                 end = RX_SGE(fp->last_max_sge);
1123                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1124                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1125                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1126
1127                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1128                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1129                 }
1130
1131                 start = RCQ_BD(fp->rx_comp_cons - 10);
1132                 end = RCQ_BD(fp->rx_comp_cons + 503);
1133                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1134                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1135
1136                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1137                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1138                 }
1139         }
1140
1141         /* Tx */
1142         for_each_valid_tx_queue(bp, i) {
1143                 struct bnx2x_fastpath *fp = &bp->fp[i];
1144
1145                 if (!bp->fp)
1146                         break;
1147
1148                 for_each_cos_in_tx_queue(fp, cos) {
1149                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1150
1151                         if (!fp->txdata_ptr[cos])
1152                                 break;
1153
1154                         if (!txdata->tx_cons_sb)
1155                                 continue;
1156
1157                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1158                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1159                         for (j = start; j != end; j = TX_BD(j + 1)) {
1160                                 struct sw_tx_bd *sw_bd =
1161                                         &txdata->tx_buf_ring[j];
1162
1163                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1164                                           i, cos, j, sw_bd->skb,
1165                                           sw_bd->first_bd);
1166                         }
1167
1168                         start = TX_BD(txdata->tx_bd_cons - 10);
1169                         end = TX_BD(txdata->tx_bd_cons + 254);
1170                         for (j = start; j != end; j = TX_BD(j + 1)) {
1171                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1172
1173                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1174                                           i, cos, j, tx_bd[0], tx_bd[1],
1175                                           tx_bd[2], tx_bd[3]);
1176                         }
1177                 }
1178         }
1179 #endif
1180         if (IS_PF(bp)) {
1181                 bnx2x_fw_dump(bp);
1182                 bnx2x_mc_assert(bp);
1183         }
1184         BNX2X_ERR("end crash dump -----------------\n");
1185 }
1186
1187 /*
1188  * FLR Support for E2
1189  *
1190  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1191  * initialization.
1192  */
1193 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1194 #define FLR_WAIT_INTERVAL       50      /* usec */
1195 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1196
1197 struct pbf_pN_buf_regs {
1198         int pN;
1199         u32 init_crd;
1200         u32 crd;
1201         u32 crd_freed;
1202 };
1203
1204 struct pbf_pN_cmd_regs {
1205         int pN;
1206         u32 lines_occup;
1207         u32 lines_freed;
1208 };
1209
1210 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1211                                      struct pbf_pN_buf_regs *regs,
1212                                      u32 poll_count)
1213 {
1214         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1215         u32 cur_cnt = poll_count;
1216
1217         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1218         crd = crd_start = REG_RD(bp, regs->crd);
1219         init_crd = REG_RD(bp, regs->init_crd);
1220
1221         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1222         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1223         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1224
1225         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1226                (init_crd - crd_start))) {
1227                 if (cur_cnt--) {
1228                         udelay(FLR_WAIT_INTERVAL);
1229                         crd = REG_RD(bp, regs->crd);
1230                         crd_freed = REG_RD(bp, regs->crd_freed);
1231                 } else {
1232                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1233                            regs->pN);
1234                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1235                            regs->pN, crd);
1236                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1237                            regs->pN, crd_freed);
1238                         break;
1239                 }
1240         }
1241         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1242            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1243 }
1244
1245 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1246                                      struct pbf_pN_cmd_regs *regs,
1247                                      u32 poll_count)
1248 {
1249         u32 occup, to_free, freed, freed_start;
1250         u32 cur_cnt = poll_count;
1251
1252         occup = to_free = REG_RD(bp, regs->lines_occup);
1253         freed = freed_start = REG_RD(bp, regs->lines_freed);
1254
1255         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1256         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1257
1258         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1259                 if (cur_cnt--) {
1260                         udelay(FLR_WAIT_INTERVAL);
1261                         occup = REG_RD(bp, regs->lines_occup);
1262                         freed = REG_RD(bp, regs->lines_freed);
1263                 } else {
1264                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1265                            regs->pN);
1266                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1267                            regs->pN, occup);
1268                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1269                            regs->pN, freed);
1270                         break;
1271                 }
1272         }
1273         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1274            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1275 }
1276
1277 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1278                                     u32 expected, u32 poll_count)
1279 {
1280         u32 cur_cnt = poll_count;
1281         u32 val;
1282
1283         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1284                 udelay(FLR_WAIT_INTERVAL);
1285
1286         return val;
1287 }
1288
1289 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1290                                     char *msg, u32 poll_cnt)
1291 {
1292         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1293         if (val != 0) {
1294                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1295                 return 1;
1296         }
1297         return 0;
1298 }
1299
1300 /* Common routines with VF FLR cleanup */
1301 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1302 {
1303         /* adjust polling timeout */
1304         if (CHIP_REV_IS_EMUL(bp))
1305                 return FLR_POLL_CNT * 2000;
1306
1307         if (CHIP_REV_IS_FPGA(bp))
1308                 return FLR_POLL_CNT * 120;
1309
1310         return FLR_POLL_CNT;
1311 }
1312
1313 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1314 {
1315         struct pbf_pN_cmd_regs cmd_regs[] = {
1316                 {0, (CHIP_IS_E3B0(bp)) ?
1317                         PBF_REG_TQ_OCCUPANCY_Q0 :
1318                         PBF_REG_P0_TQ_OCCUPANCY,
1319                     (CHIP_IS_E3B0(bp)) ?
1320                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1321                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1322                 {1, (CHIP_IS_E3B0(bp)) ?
1323                         PBF_REG_TQ_OCCUPANCY_Q1 :
1324                         PBF_REG_P1_TQ_OCCUPANCY,
1325                     (CHIP_IS_E3B0(bp)) ?
1326                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1327                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1328                 {4, (CHIP_IS_E3B0(bp)) ?
1329                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1330                         PBF_REG_P4_TQ_OCCUPANCY,
1331                     (CHIP_IS_E3B0(bp)) ?
1332                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1333                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1334         };
1335
1336         struct pbf_pN_buf_regs buf_regs[] = {
1337                 {0, (CHIP_IS_E3B0(bp)) ?
1338                         PBF_REG_INIT_CRD_Q0 :
1339                         PBF_REG_P0_INIT_CRD ,
1340                     (CHIP_IS_E3B0(bp)) ?
1341                         PBF_REG_CREDIT_Q0 :
1342                         PBF_REG_P0_CREDIT,
1343                     (CHIP_IS_E3B0(bp)) ?
1344                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1345                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1346                 {1, (CHIP_IS_E3B0(bp)) ?
1347                         PBF_REG_INIT_CRD_Q1 :
1348                         PBF_REG_P1_INIT_CRD,
1349                     (CHIP_IS_E3B0(bp)) ?
1350                         PBF_REG_CREDIT_Q1 :
1351                         PBF_REG_P1_CREDIT,
1352                     (CHIP_IS_E3B0(bp)) ?
1353                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1354                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1355                 {4, (CHIP_IS_E3B0(bp)) ?
1356                         PBF_REG_INIT_CRD_LB_Q :
1357                         PBF_REG_P4_INIT_CRD,
1358                     (CHIP_IS_E3B0(bp)) ?
1359                         PBF_REG_CREDIT_LB_Q :
1360                         PBF_REG_P4_CREDIT,
1361                     (CHIP_IS_E3B0(bp)) ?
1362                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1363                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1364         };
1365
1366         int i;
1367
1368         /* Verify the command queues are flushed P0, P1, P4 */
1369         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1370                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1371
1372         /* Verify the transmission buffers are flushed P0, P1, P4 */
1373         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1374                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1375 }
1376
1377 #define OP_GEN_PARAM(param) \
1378         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1379
1380 #define OP_GEN_TYPE(type) \
1381         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1382
1383 #define OP_GEN_AGG_VECT(index) \
1384         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1385
1386 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1387 {
1388         u32 op_gen_command = 0;
1389         u32 comp_addr = BAR_CSTRORM_INTMEM +
1390                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1391         int ret = 0;
1392
1393         if (REG_RD(bp, comp_addr)) {
1394                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1395                 return 1;
1396         }
1397
1398         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1399         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1400         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1401         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1402
1403         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1404         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1405
1406         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1407                 BNX2X_ERR("FW final cleanup did not succeed\n");
1408                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1409                    (REG_RD(bp, comp_addr)));
1410                 bnx2x_panic();
1411                 return 1;
1412         }
1413         /* Zero completion for next FLR */
1414         REG_WR(bp, comp_addr, 0);
1415
1416         return ret;
1417 }
1418
1419 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1420 {
1421         u16 status;
1422
1423         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1424         return status & PCI_EXP_DEVSTA_TRPND;
1425 }
1426
1427 /* PF FLR specific routines
1428 */
1429 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1430 {
1431         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1432         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1433                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1434                         "CFC PF usage counter timed out",
1435                         poll_cnt))
1436                 return 1;
1437
1438         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1439         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1440                         DORQ_REG_PF_USAGE_CNT,
1441                         "DQ PF usage counter timed out",
1442                         poll_cnt))
1443                 return 1;
1444
1445         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1446         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1447                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1448                         "QM PF usage counter timed out",
1449                         poll_cnt))
1450                 return 1;
1451
1452         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1453         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1454                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1455                         "Timers VNIC usage counter timed out",
1456                         poll_cnt))
1457                 return 1;
1458         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1459                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1460                         "Timers NUM_SCANS usage counter timed out",
1461                         poll_cnt))
1462                 return 1;
1463
1464         /* Wait DMAE PF usage counter to zero */
1465         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1466                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1467                         "DMAE command register timed out",
1468                         poll_cnt))
1469                 return 1;
1470
1471         return 0;
1472 }
1473
1474 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1475 {
1476         u32 val;
1477
1478         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1479         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1480
1481         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1482         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1483
1484         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1485         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1486
1487         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1488         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1489
1490         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1491         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1492
1493         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1494         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1495
1496         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1497         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1498
1499         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1500         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1501            val);
1502 }
1503
1504 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1505 {
1506         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1507
1508         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1509
1510         /* Re-enable PF target read access */
1511         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1512
1513         /* Poll HW usage counters */
1514         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1515         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1516                 return -EBUSY;
1517
1518         /* Zero the igu 'trailing edge' and 'leading edge' */
1519
1520         /* Send the FW cleanup command */
1521         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1522                 return -EBUSY;
1523
1524         /* ATC cleanup */
1525
1526         /* Verify TX hw is flushed */
1527         bnx2x_tx_hw_flushed(bp, poll_cnt);
1528
1529         /* Wait 100ms (not adjusted according to platform) */
1530         msleep(100);
1531
1532         /* Verify no pending pci transactions */
1533         if (bnx2x_is_pcie_pending(bp->pdev))
1534                 BNX2X_ERR("PCIE Transactions still pending\n");
1535
1536         /* Debug */
1537         bnx2x_hw_enable_status(bp);
1538
1539         /*
1540          * Master enable - Due to WB DMAE writes performed before this
1541          * register is re-initialized as part of the regular function init
1542          */
1543         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1544
1545         return 0;
1546 }
1547
1548 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1549 {
1550         int port = BP_PORT(bp);
1551         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1552         u32 val = REG_RD(bp, addr);
1553         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1554         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1555         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1556
1557         if (msix) {
1558                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1559                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1560                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1561                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1562                 if (single_msix)
1563                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1564         } else if (msi) {
1565                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1566                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1567                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1568                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1569         } else {
1570                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1571                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1572                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1573                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1574
1575                 if (!CHIP_IS_E1(bp)) {
1576                         DP(NETIF_MSG_IFUP,
1577                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1578
1579                         REG_WR(bp, addr, val);
1580
1581                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1582                 }
1583         }
1584
1585         if (CHIP_IS_E1(bp))
1586                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1587
1588         DP(NETIF_MSG_IFUP,
1589            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1590            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1591
1592         REG_WR(bp, addr, val);
1593         /*
1594          * Ensure that HC_CONFIG is written before leading/trailing edge config
1595          */
1596         mmiowb();
1597         barrier();
1598
1599         if (!CHIP_IS_E1(bp)) {
1600                 /* init leading/trailing edge */
1601                 if (IS_MF(bp)) {
1602                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1603                         if (bp->port.pmf)
1604                                 /* enable nig and gpio3 attention */
1605                                 val |= 0x1100;
1606                 } else
1607                         val = 0xffff;
1608
1609                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1610                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1611         }
1612
1613         /* Make sure that interrupts are indeed enabled from here on */
1614         mmiowb();
1615 }
1616
1617 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1618 {
1619         u32 val;
1620         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1621         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1622         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1623
1624         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1625
1626         if (msix) {
1627                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1628                          IGU_PF_CONF_SINGLE_ISR_EN);
1629                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1630                         IGU_PF_CONF_ATTN_BIT_EN);
1631
1632                 if (single_msix)
1633                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1634         } else if (msi) {
1635                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1636                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1637                         IGU_PF_CONF_ATTN_BIT_EN |
1638                         IGU_PF_CONF_SINGLE_ISR_EN);
1639         } else {
1640                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1641                 val |= (IGU_PF_CONF_INT_LINE_EN |
1642                         IGU_PF_CONF_ATTN_BIT_EN |
1643                         IGU_PF_CONF_SINGLE_ISR_EN);
1644         }
1645
1646         /* Clean previous status - need to configure igu prior to ack*/
1647         if ((!msix) || single_msix) {
1648                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1649                 bnx2x_ack_int(bp);
1650         }
1651
1652         val |= IGU_PF_CONF_FUNC_EN;
1653
1654         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1655            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1656
1657         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1658
1659         if (val & IGU_PF_CONF_INT_LINE_EN)
1660                 pci_intx(bp->pdev, true);
1661
1662         barrier();
1663
1664         /* init leading/trailing edge */
1665         if (IS_MF(bp)) {
1666                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1667                 if (bp->port.pmf)
1668                         /* enable nig and gpio3 attention */
1669                         val |= 0x1100;
1670         } else
1671                 val = 0xffff;
1672
1673         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1674         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1675
1676         /* Make sure that interrupts are indeed enabled from here on */
1677         mmiowb();
1678 }
1679
1680 void bnx2x_int_enable(struct bnx2x *bp)
1681 {
1682         if (bp->common.int_block == INT_BLOCK_HC)
1683                 bnx2x_hc_int_enable(bp);
1684         else
1685                 bnx2x_igu_int_enable(bp);
1686 }
1687
1688 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1689 {
1690         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1691         int i, offset;
1692
1693         if (disable_hw)
1694                 /* prevent the HW from sending interrupts */
1695                 bnx2x_int_disable(bp);
1696
1697         /* make sure all ISRs are done */
1698         if (msix) {
1699                 synchronize_irq(bp->msix_table[0].vector);
1700                 offset = 1;
1701                 if (CNIC_SUPPORT(bp))
1702                         offset++;
1703                 for_each_eth_queue(bp, i)
1704                         synchronize_irq(bp->msix_table[offset++].vector);
1705         } else
1706                 synchronize_irq(bp->pdev->irq);
1707
1708         /* make sure sp_task is not running */
1709         cancel_delayed_work(&bp->sp_task);
1710         cancel_delayed_work(&bp->period_task);
1711         flush_workqueue(bnx2x_wq);
1712 }
1713
1714 /* fast path */
1715
1716 /*
1717  * General service functions
1718  */
1719
1720 /* Return true if succeeded to acquire the lock */
1721 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1722 {
1723         u32 lock_status;
1724         u32 resource_bit = (1 << resource);
1725         int func = BP_FUNC(bp);
1726         u32 hw_lock_control_reg;
1727
1728         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1729            "Trying to take a lock on resource %d\n", resource);
1730
1731         /* Validating that the resource is within range */
1732         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1733                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1734                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1735                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1736                 return false;
1737         }
1738
1739         if (func <= 5)
1740                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1741         else
1742                 hw_lock_control_reg =
1743                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1744
1745         /* Try to acquire the lock */
1746         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1747         lock_status = REG_RD(bp, hw_lock_control_reg);
1748         if (lock_status & resource_bit)
1749                 return true;
1750
1751         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1752            "Failed to get a lock on resource %d\n", resource);
1753         return false;
1754 }
1755
1756 /**
1757  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1758  *
1759  * @bp: driver handle
1760  *
1761  * Returns the recovery leader resource id according to the engine this function
1762  * belongs to. Currently only only 2 engines is supported.
1763  */
1764 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1765 {
1766         if (BP_PATH(bp))
1767                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1768         else
1769                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1770 }
1771
1772 /**
1773  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1774  *
1775  * @bp: driver handle
1776  *
1777  * Tries to acquire a leader lock for current engine.
1778  */
1779 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1780 {
1781         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1782 }
1783
1784 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1785
1786 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1787 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1788 {
1789         /* Set the interrupt occurred bit for the sp-task to recognize it
1790          * must ack the interrupt and transition according to the IGU
1791          * state machine.
1792          */
1793         atomic_set(&bp->interrupt_occurred, 1);
1794
1795         /* The sp_task must execute only after this bit
1796          * is set, otherwise we will get out of sync and miss all
1797          * further interrupts. Hence, the barrier.
1798          */
1799         smp_wmb();
1800
1801         /* schedule sp_task to workqueue */
1802         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1803 }
1804
1805 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1806 {
1807         struct bnx2x *bp = fp->bp;
1808         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1809         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1810         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1811         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1812
1813         DP(BNX2X_MSG_SP,
1814            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1815            fp->index, cid, command, bp->state,
1816            rr_cqe->ramrod_cqe.ramrod_type);
1817
1818         /* If cid is within VF range, replace the slowpath object with the
1819          * one corresponding to this VF
1820          */
1821         if (cid >= BNX2X_FIRST_VF_CID  &&
1822             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1823                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1824
1825         switch (command) {
1826         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1827                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1828                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1829                 break;
1830
1831         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1832                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1833                 drv_cmd = BNX2X_Q_CMD_SETUP;
1834                 break;
1835
1836         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1837                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1838                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1839                 break;
1840
1841         case (RAMROD_CMD_ID_ETH_HALT):
1842                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1843                 drv_cmd = BNX2X_Q_CMD_HALT;
1844                 break;
1845
1846         case (RAMROD_CMD_ID_ETH_TERMINATE):
1847                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1848                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1849                 break;
1850
1851         case (RAMROD_CMD_ID_ETH_EMPTY):
1852                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1853                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1854                 break;
1855
1856         case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1857                 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1858                 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1859                 break;
1860
1861         default:
1862                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1863                           command, fp->index);
1864                 return;
1865         }
1866
1867         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1868             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1869                 /* q_obj->complete_cmd() failure means that this was
1870                  * an unexpected completion.
1871                  *
1872                  * In this case we don't want to increase the bp->spq_left
1873                  * because apparently we haven't sent this command the first
1874                  * place.
1875                  */
1876 #ifdef BNX2X_STOP_ON_ERROR
1877                 bnx2x_panic();
1878 #else
1879                 return;
1880 #endif
1881
1882         smp_mb__before_atomic();
1883         atomic_inc(&bp->cq_spq_left);
1884         /* push the change in bp->spq_left and towards the memory */
1885         smp_mb__after_atomic();
1886
1887         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1888
1889         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1890             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1891                 /* if Q update ramrod is completed for last Q in AFEX vif set
1892                  * flow, then ACK MCP at the end
1893                  *
1894                  * mark pending ACK to MCP bit.
1895                  * prevent case that both bits are cleared.
1896                  * At the end of load/unload driver checks that
1897                  * sp_state is cleared, and this order prevents
1898                  * races
1899                  */
1900                 smp_mb__before_atomic();
1901                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1902                 wmb();
1903                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1904                 smp_mb__after_atomic();
1905
1906                 /* schedule the sp task as mcp ack is required */
1907                 bnx2x_schedule_sp_task(bp);
1908         }
1909
1910         return;
1911 }
1912
1913 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1914 {
1915         struct bnx2x *bp = netdev_priv(dev_instance);
1916         u16 status = bnx2x_ack_int(bp);
1917         u16 mask;
1918         int i;
1919         u8 cos;
1920
1921         /* Return here if interrupt is shared and it's not for us */
1922         if (unlikely(status == 0)) {
1923                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1924                 return IRQ_NONE;
1925         }
1926         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1927
1928 #ifdef BNX2X_STOP_ON_ERROR
1929         if (unlikely(bp->panic))
1930                 return IRQ_HANDLED;
1931 #endif
1932
1933         for_each_eth_queue(bp, i) {
1934                 struct bnx2x_fastpath *fp = &bp->fp[i];
1935
1936                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1937                 if (status & mask) {
1938                         /* Handle Rx or Tx according to SB id */
1939                         for_each_cos_in_tx_queue(fp, cos)
1940                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1941                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1942                         napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1943                         status &= ~mask;
1944                 }
1945         }
1946
1947         if (CNIC_SUPPORT(bp)) {
1948                 mask = 0x2;
1949                 if (status & (mask | 0x1)) {
1950                         struct cnic_ops *c_ops = NULL;
1951
1952                         rcu_read_lock();
1953                         c_ops = rcu_dereference(bp->cnic_ops);
1954                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1955                                       CNIC_DRV_STATE_HANDLES_IRQ))
1956                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1957                         rcu_read_unlock();
1958
1959                         status &= ~mask;
1960                 }
1961         }
1962
1963         if (unlikely(status & 0x1)) {
1964
1965                 /* schedule sp task to perform default status block work, ack
1966                  * attentions and enable interrupts.
1967                  */
1968                 bnx2x_schedule_sp_task(bp);
1969
1970                 status &= ~0x1;
1971                 if (!status)
1972                         return IRQ_HANDLED;
1973         }
1974
1975         if (unlikely(status))
1976                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1977                    status);
1978
1979         return IRQ_HANDLED;
1980 }
1981
1982 /* Link */
1983
1984 /*
1985  * General service functions
1986  */
1987
1988 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1989 {
1990         u32 lock_status;
1991         u32 resource_bit = (1 << resource);
1992         int func = BP_FUNC(bp);
1993         u32 hw_lock_control_reg;
1994         int cnt;
1995
1996         /* Validating that the resource is within range */
1997         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1998                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1999                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2000                 return -EINVAL;
2001         }
2002
2003         if (func <= 5) {
2004                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2005         } else {
2006                 hw_lock_control_reg =
2007                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2008         }
2009
2010         /* Validating that the resource is not already taken */
2011         lock_status = REG_RD(bp, hw_lock_control_reg);
2012         if (lock_status & resource_bit) {
2013                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
2014                    lock_status, resource_bit);
2015                 return -EEXIST;
2016         }
2017
2018         /* Try for 5 second every 5ms */
2019         for (cnt = 0; cnt < 1000; cnt++) {
2020                 /* Try to acquire the lock */
2021                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2022                 lock_status = REG_RD(bp, hw_lock_control_reg);
2023                 if (lock_status & resource_bit)
2024                         return 0;
2025
2026                 usleep_range(5000, 10000);
2027         }
2028         BNX2X_ERR("Timeout\n");
2029         return -EAGAIN;
2030 }
2031
2032 int bnx2x_release_leader_lock(struct bnx2x *bp)
2033 {
2034         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2035 }
2036
2037 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2038 {
2039         u32 lock_status;
2040         u32 resource_bit = (1 << resource);
2041         int func = BP_FUNC(bp);
2042         u32 hw_lock_control_reg;
2043
2044         /* Validating that the resource is within range */
2045         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2046                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2047                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
2048                 return -EINVAL;
2049         }
2050
2051         if (func <= 5) {
2052                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2053         } else {
2054                 hw_lock_control_reg =
2055                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2056         }
2057
2058         /* Validating that the resource is currently taken */
2059         lock_status = REG_RD(bp, hw_lock_control_reg);
2060         if (!(lock_status & resource_bit)) {
2061                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2062                           lock_status, resource_bit);
2063                 return -EFAULT;
2064         }
2065
2066         REG_WR(bp, hw_lock_control_reg, resource_bit);
2067         return 0;
2068 }
2069
2070 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2071 {
2072         /* The GPIO should be swapped if swap register is set and active */
2073         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2074                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2075         int gpio_shift = gpio_num +
2076                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2077         u32 gpio_mask = (1 << gpio_shift);
2078         u32 gpio_reg;
2079         int value;
2080
2081         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2082                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2083                 return -EINVAL;
2084         }
2085
2086         /* read GPIO value */
2087         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2088
2089         /* get the requested pin value */
2090         if ((gpio_reg & gpio_mask) == gpio_mask)
2091                 value = 1;
2092         else
2093                 value = 0;
2094
2095         return value;
2096 }
2097
2098 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2099 {
2100         /* The GPIO should be swapped if swap register is set and active */
2101         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2102                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2103         int gpio_shift = gpio_num +
2104                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2105         u32 gpio_mask = (1 << gpio_shift);
2106         u32 gpio_reg;
2107
2108         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2109                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2110                 return -EINVAL;
2111         }
2112
2113         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2114         /* read GPIO and mask except the float bits */
2115         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2116
2117         switch (mode) {
2118         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2119                 DP(NETIF_MSG_LINK,
2120                    "Set GPIO %d (shift %d) -> output low\n",
2121                    gpio_num, gpio_shift);
2122                 /* clear FLOAT and set CLR */
2123                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2124                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2125                 break;
2126
2127         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2128                 DP(NETIF_MSG_LINK,
2129                    "Set GPIO %d (shift %d) -> output high\n",
2130                    gpio_num, gpio_shift);
2131                 /* clear FLOAT and set SET */
2132                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2133                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2134                 break;
2135
2136         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2137                 DP(NETIF_MSG_LINK,
2138                    "Set GPIO %d (shift %d) -> input\n",
2139                    gpio_num, gpio_shift);
2140                 /* set FLOAT */
2141                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2142                 break;
2143
2144         default:
2145                 break;
2146         }
2147
2148         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2149         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2150
2151         return 0;
2152 }
2153
2154 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2155 {
2156         u32 gpio_reg = 0;
2157         int rc = 0;
2158
2159         /* Any port swapping should be handled by caller. */
2160
2161         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2162         /* read GPIO and mask except the float bits */
2163         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2164         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2165         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2166         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2167
2168         switch (mode) {
2169         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2170                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2171                 /* set CLR */
2172                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2173                 break;
2174
2175         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2176                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2177                 /* set SET */
2178                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2179                 break;
2180
2181         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2182                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2183                 /* set FLOAT */
2184                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2185                 break;
2186
2187         default:
2188                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2189                 rc = -EINVAL;
2190                 break;
2191         }
2192
2193         if (rc == 0)
2194                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2195
2196         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2197
2198         return rc;
2199 }
2200
2201 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2202 {
2203         /* The GPIO should be swapped if swap register is set and active */
2204         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2205                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2206         int gpio_shift = gpio_num +
2207                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2208         u32 gpio_mask = (1 << gpio_shift);
2209         u32 gpio_reg;
2210
2211         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2212                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2213                 return -EINVAL;
2214         }
2215
2216         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2217         /* read GPIO int */
2218         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2219
2220         switch (mode) {
2221         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2222                 DP(NETIF_MSG_LINK,
2223                    "Clear GPIO INT %d (shift %d) -> output low\n",
2224                    gpio_num, gpio_shift);
2225                 /* clear SET and set CLR */
2226                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2227                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2228                 break;
2229
2230         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2231                 DP(NETIF_MSG_LINK,
2232                    "Set GPIO INT %d (shift %d) -> output high\n",
2233                    gpio_num, gpio_shift);
2234                 /* clear CLR and set SET */
2235                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2236                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2237                 break;
2238
2239         default:
2240                 break;
2241         }
2242
2243         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2244         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2245
2246         return 0;
2247 }
2248
2249 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2250 {
2251         u32 spio_reg;
2252
2253         /* Only 2 SPIOs are configurable */
2254         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2255                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2256                 return -EINVAL;
2257         }
2258
2259         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2260         /* read SPIO and mask except the float bits */
2261         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2262
2263         switch (mode) {
2264         case MISC_SPIO_OUTPUT_LOW:
2265                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2266                 /* clear FLOAT and set CLR */
2267                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2268                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2269                 break;
2270
2271         case MISC_SPIO_OUTPUT_HIGH:
2272                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2273                 /* clear FLOAT and set SET */
2274                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2275                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2276                 break;
2277
2278         case MISC_SPIO_INPUT_HI_Z:
2279                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2280                 /* set FLOAT */
2281                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2282                 break;
2283
2284         default:
2285                 break;
2286         }
2287
2288         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2289         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2290
2291         return 0;
2292 }
2293
2294 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2295 {
2296         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2297
2298         bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2299                                            ADVERTISED_Pause);
2300         switch (bp->link_vars.ieee_fc &
2301                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2302         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2303                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2304                                                   ADVERTISED_Pause);
2305                 break;
2306
2307         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2308                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2309                 break;
2310
2311         default:
2312                 break;
2313         }
2314 }
2315
2316 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2317 {
2318         /* Initialize link parameters structure variables
2319          * It is recommended to turn off RX FC for jumbo frames
2320          *  for better performance
2321          */
2322         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2323                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2324         else
2325                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2326 }
2327
2328 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2329 {
2330         u32 pause_enabled = 0;
2331
2332         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2333                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2334                         pause_enabled = 1;
2335
2336                 REG_WR(bp, BAR_USTRORM_INTMEM +
2337                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2338                        pause_enabled);
2339         }
2340
2341         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2342            pause_enabled ? "enabled" : "disabled");
2343 }
2344
2345 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2346 {
2347         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2348         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2349
2350         if (!BP_NOMCP(bp)) {
2351                 bnx2x_set_requested_fc(bp);
2352                 bnx2x_acquire_phy_lock(bp);
2353
2354                 if (load_mode == LOAD_DIAG) {
2355                         struct link_params *lp = &bp->link_params;
2356                         lp->loopback_mode = LOOPBACK_XGXS;
2357                         /* Prefer doing PHY loopback at highest speed */
2358                         if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2359                                 if (lp->speed_cap_mask[cfx_idx] &
2360                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2361                                         lp->req_line_speed[cfx_idx] =
2362                                         SPEED_20000;
2363                                 else if (lp->speed_cap_mask[cfx_idx] &
2364                                             PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2365                                                 lp->req_line_speed[cfx_idx] =
2366                                                 SPEED_10000;
2367                                 else
2368                                         lp->req_line_speed[cfx_idx] =
2369                                         SPEED_1000;
2370                         }
2371                 }
2372
2373                 if (load_mode == LOAD_LOOPBACK_EXT) {
2374                         struct link_params *lp = &bp->link_params;
2375                         lp->loopback_mode = LOOPBACK_EXT;
2376                 }
2377
2378                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2379
2380                 bnx2x_release_phy_lock(bp);
2381
2382                 bnx2x_init_dropless_fc(bp);
2383
2384                 bnx2x_calc_fc_adv(bp);
2385
2386                 if (bp->link_vars.link_up) {
2387                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2388                         bnx2x_link_report(bp);
2389                 }
2390                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2391                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2392                 return rc;
2393         }
2394         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2395         return -EINVAL;
2396 }
2397
2398 void bnx2x_link_set(struct bnx2x *bp)
2399 {
2400         if (!BP_NOMCP(bp)) {
2401                 bnx2x_acquire_phy_lock(bp);
2402                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2403                 bnx2x_release_phy_lock(bp);
2404
2405                 bnx2x_init_dropless_fc(bp);
2406
2407                 bnx2x_calc_fc_adv(bp);
2408         } else
2409                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2410 }
2411
2412 static void bnx2x__link_reset(struct bnx2x *bp)
2413 {
2414         if (!BP_NOMCP(bp)) {
2415                 bnx2x_acquire_phy_lock(bp);
2416                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2417                 bnx2x_release_phy_lock(bp);
2418         } else
2419                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2420 }
2421
2422 void bnx2x_force_link_reset(struct bnx2x *bp)
2423 {
2424         bnx2x_acquire_phy_lock(bp);
2425         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2426         bnx2x_release_phy_lock(bp);
2427 }
2428
2429 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2430 {
2431         u8 rc = 0;
2432
2433         if (!BP_NOMCP(bp)) {
2434                 bnx2x_acquire_phy_lock(bp);
2435                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2436                                      is_serdes);
2437                 bnx2x_release_phy_lock(bp);
2438         } else
2439                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2440
2441         return rc;
2442 }
2443
2444 /* Calculates the sum of vn_min_rates.
2445    It's needed for further normalizing of the min_rates.
2446    Returns:
2447      sum of vn_min_rates.
2448        or
2449      0 - if all the min_rates are 0.
2450      In the later case fairness algorithm should be deactivated.
2451      If not all min_rates are zero then those that are zeroes will be set to 1.
2452  */
2453 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2454                                       struct cmng_init_input *input)
2455 {
2456         int all_zero = 1;
2457         int vn;
2458
2459         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2460                 u32 vn_cfg = bp->mf_config[vn];
2461                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2462                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2463
2464                 /* Skip hidden vns */
2465                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2466                         vn_min_rate = 0;
2467                 /* If min rate is zero - set it to 1 */
2468                 else if (!vn_min_rate)
2469                         vn_min_rate = DEF_MIN_RATE;
2470                 else
2471                         all_zero = 0;
2472
2473                 input->vnic_min_rate[vn] = vn_min_rate;
2474         }
2475
2476         /* if ETS or all min rates are zeros - disable fairness */
2477         if (BNX2X_IS_ETS_ENABLED(bp)) {
2478                 input->flags.cmng_enables &=
2479                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2480                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2481         } else if (all_zero) {
2482                 input->flags.cmng_enables &=
2483                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2484                 DP(NETIF_MSG_IFUP,
2485                    "All MIN values are zeroes fairness will be disabled\n");
2486         } else
2487                 input->flags.cmng_enables |=
2488                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2489 }
2490
2491 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2492                                     struct cmng_init_input *input)
2493 {
2494         u16 vn_max_rate;
2495         u32 vn_cfg = bp->mf_config[vn];
2496
2497         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2498                 vn_max_rate = 0;
2499         else {
2500                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2501
2502                 if (IS_MF_PERCENT_BW(bp)) {
2503                         /* maxCfg in percents of linkspeed */
2504                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2505                 } else /* SD modes */
2506                         /* maxCfg is absolute in 100Mb units */
2507                         vn_max_rate = maxCfg * 100;
2508         }
2509
2510         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2511
2512         input->vnic_max_rate[vn] = vn_max_rate;
2513 }
2514
2515 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2516 {
2517         if (CHIP_REV_IS_SLOW(bp))
2518                 return CMNG_FNS_NONE;
2519         if (IS_MF(bp))
2520                 return CMNG_FNS_MINMAX;
2521
2522         return CMNG_FNS_NONE;
2523 }
2524
2525 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2526 {
2527         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2528
2529         if (BP_NOMCP(bp))
2530                 return; /* what should be the default value in this case */
2531
2532         /* For 2 port configuration the absolute function number formula
2533          * is:
2534          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2535          *
2536          *      and there are 4 functions per port
2537          *
2538          * For 4 port configuration it is
2539          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2540          *
2541          *      and there are 2 functions per port
2542          */
2543         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2544                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2545
2546                 if (func >= E1H_FUNC_MAX)
2547                         break;
2548
2549                 bp->mf_config[vn] =
2550                         MF_CFG_RD(bp, func_mf_config[func].config);
2551         }
2552         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2553                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2554                 bp->flags |= MF_FUNC_DIS;
2555         } else {
2556                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2557                 bp->flags &= ~MF_FUNC_DIS;
2558         }
2559 }
2560
2561 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2562 {
2563         struct cmng_init_input input;
2564         memset(&input, 0, sizeof(struct cmng_init_input));
2565
2566         input.port_rate = bp->link_vars.line_speed;
2567
2568         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2569                 int vn;
2570
2571                 /* read mf conf from shmem */
2572                 if (read_cfg)
2573                         bnx2x_read_mf_cfg(bp);
2574
2575                 /* vn_weight_sum and enable fairness if not 0 */
2576                 bnx2x_calc_vn_min(bp, &input);
2577
2578                 /* calculate and set min-max rate for each vn */
2579                 if (bp->port.pmf)
2580                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2581                                 bnx2x_calc_vn_max(bp, vn, &input);
2582
2583                 /* always enable rate shaping and fairness */
2584                 input.flags.cmng_enables |=
2585                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2586
2587                 bnx2x_init_cmng(&input, &bp->cmng);
2588                 return;
2589         }
2590
2591         /* rate shaping and fairness are disabled */
2592         DP(NETIF_MSG_IFUP,
2593            "rate shaping and fairness are disabled\n");
2594 }
2595
2596 static void storm_memset_cmng(struct bnx2x *bp,
2597                               struct cmng_init *cmng,
2598                               u8 port)
2599 {
2600         int vn;
2601         size_t size = sizeof(struct cmng_struct_per_port);
2602
2603         u32 addr = BAR_XSTRORM_INTMEM +
2604                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2605
2606         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2607
2608         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2609                 int func = func_by_vn(bp, vn);
2610
2611                 addr = BAR_XSTRORM_INTMEM +
2612                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2613                 size = sizeof(struct rate_shaping_vars_per_vn);
2614                 __storm_memset_struct(bp, addr, size,
2615                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2616
2617                 addr = BAR_XSTRORM_INTMEM +
2618                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2619                 size = sizeof(struct fairness_vars_per_vn);
2620                 __storm_memset_struct(bp, addr, size,
2621                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2622         }
2623 }
2624
2625 /* init cmng mode in HW according to local configuration */
2626 void bnx2x_set_local_cmng(struct bnx2x *bp)
2627 {
2628         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2629
2630         if (cmng_fns != CMNG_FNS_NONE) {
2631                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2632                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2633         } else {
2634                 /* rate shaping and fairness are disabled */
2635                 DP(NETIF_MSG_IFUP,
2636                    "single function mode without fairness\n");
2637         }
2638 }
2639
2640 /* This function is called upon link interrupt */
2641 static void bnx2x_link_attn(struct bnx2x *bp)
2642 {
2643         /* Make sure that we are synced with the current statistics */
2644         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2645
2646         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2647
2648         bnx2x_init_dropless_fc(bp);
2649
2650         if (bp->link_vars.link_up) {
2651
2652                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2653                         struct host_port_stats *pstats;
2654
2655                         pstats = bnx2x_sp(bp, port_stats);
2656                         /* reset old mac stats */
2657                         memset(&(pstats->mac_stx[0]), 0,
2658                                sizeof(struct mac_stx));
2659                 }
2660                 if (bp->state == BNX2X_STATE_OPEN)
2661                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2662         }
2663
2664         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2665                 bnx2x_set_local_cmng(bp);
2666
2667         __bnx2x_link_report(bp);
2668
2669         if (IS_MF(bp))
2670                 bnx2x_link_sync_notify(bp);
2671 }
2672
2673 void bnx2x__link_status_update(struct bnx2x *bp)
2674 {
2675         if (bp->state != BNX2X_STATE_OPEN)
2676                 return;
2677
2678         /* read updated dcb configuration */
2679         if (IS_PF(bp)) {
2680                 bnx2x_dcbx_pmf_update(bp);
2681                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2682                 if (bp->link_vars.link_up)
2683                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2684                 else
2685                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2686                         /* indicate link status */
2687                 bnx2x_link_report(bp);
2688
2689         } else { /* VF */
2690                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2691                                           SUPPORTED_10baseT_Full |
2692                                           SUPPORTED_100baseT_Half |
2693                                           SUPPORTED_100baseT_Full |
2694                                           SUPPORTED_1000baseT_Full |
2695                                           SUPPORTED_2500baseX_Full |
2696                                           SUPPORTED_10000baseT_Full |
2697                                           SUPPORTED_TP |
2698                                           SUPPORTED_FIBRE |
2699                                           SUPPORTED_Autoneg |
2700                                           SUPPORTED_Pause |
2701                                           SUPPORTED_Asym_Pause);
2702                 bp->port.advertising[0] = bp->port.supported[0];
2703
2704                 bp->link_params.bp = bp;
2705                 bp->link_params.port = BP_PORT(bp);
2706                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2707                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2708                 bp->link_params.req_line_speed[0] = SPEED_10000;
2709                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2710                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2711                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2712                 bp->link_vars.line_speed = SPEED_10000;
2713                 bp->link_vars.link_status =
2714                         (LINK_STATUS_LINK_UP |
2715                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2716                 bp->link_vars.link_up = 1;
2717                 bp->link_vars.duplex = DUPLEX_FULL;
2718                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2719                 __bnx2x_link_report(bp);
2720
2721                 bnx2x_sample_bulletin(bp);
2722
2723                 /* if bulletin board did not have an update for link status
2724                  * __bnx2x_link_report will report current status
2725                  * but it will NOT duplicate report in case of already reported
2726                  * during sampling bulletin board.
2727                  */
2728                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2729         }
2730 }
2731
2732 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2733                                   u16 vlan_val, u8 allowed_prio)
2734 {
2735         struct bnx2x_func_state_params func_params = {NULL};
2736         struct bnx2x_func_afex_update_params *f_update_params =
2737                 &func_params.params.afex_update;
2738
2739         func_params.f_obj = &bp->func_obj;
2740         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2741
2742         /* no need to wait for RAMROD completion, so don't
2743          * set RAMROD_COMP_WAIT flag
2744          */
2745
2746         f_update_params->vif_id = vifid;
2747         f_update_params->afex_default_vlan = vlan_val;
2748         f_update_params->allowed_priorities = allowed_prio;
2749
2750         /* if ramrod can not be sent, response to MCP immediately */
2751         if (bnx2x_func_state_change(bp, &func_params) < 0)
2752                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2753
2754         return 0;
2755 }
2756
2757 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2758                                           u16 vif_index, u8 func_bit_map)
2759 {
2760         struct bnx2x_func_state_params func_params = {NULL};
2761         struct bnx2x_func_afex_viflists_params *update_params =
2762                 &func_params.params.afex_viflists;
2763         int rc;
2764         u32 drv_msg_code;
2765
2766         /* validate only LIST_SET and LIST_GET are received from switch */
2767         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2768                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2769                           cmd_type);
2770
2771         func_params.f_obj = &bp->func_obj;
2772         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2773
2774         /* set parameters according to cmd_type */
2775         update_params->afex_vif_list_command = cmd_type;
2776         update_params->vif_list_index = vif_index;
2777         update_params->func_bit_map =
2778                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2779         update_params->func_to_clear = 0;
2780         drv_msg_code =
2781                 (cmd_type == VIF_LIST_RULE_GET) ?
2782                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2783                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2784
2785         /* if ramrod can not be sent, respond to MCP immediately for
2786          * SET and GET requests (other are not triggered from MCP)
2787          */
2788         rc = bnx2x_func_state_change(bp, &func_params);
2789         if (rc < 0)
2790                 bnx2x_fw_command(bp, drv_msg_code, 0);
2791
2792         return 0;
2793 }
2794
2795 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2796 {
2797         struct afex_stats afex_stats;
2798         u32 func = BP_ABS_FUNC(bp);
2799         u32 mf_config;
2800         u16 vlan_val;
2801         u32 vlan_prio;
2802         u16 vif_id;
2803         u8 allowed_prio;
2804         u8 vlan_mode;
2805         u32 addr_to_write, vifid, addrs, stats_type, i;
2806
2807         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2808                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2809                 DP(BNX2X_MSG_MCP,
2810                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2811                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2812         }
2813
2814         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2815                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2816                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2817                 DP(BNX2X_MSG_MCP,
2818                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2819                    vifid, addrs);
2820                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2821                                                addrs);
2822         }
2823
2824         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2825                 addr_to_write = SHMEM2_RD(bp,
2826                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2827                 stats_type = SHMEM2_RD(bp,
2828                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2829
2830                 DP(BNX2X_MSG_MCP,
2831                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2832                    addr_to_write);
2833
2834                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2835
2836                 /* write response to scratchpad, for MCP */
2837                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2838                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2839                                *(((u32 *)(&afex_stats))+i));
2840
2841                 /* send ack message to MCP */
2842                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2843         }
2844
2845         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2846                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2847                 bp->mf_config[BP_VN(bp)] = mf_config;
2848                 DP(BNX2X_MSG_MCP,
2849                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2850                    mf_config);
2851
2852                 /* if VIF_SET is "enabled" */
2853                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2854                         /* set rate limit directly to internal RAM */
2855                         struct cmng_init_input cmng_input;
2856                         struct rate_shaping_vars_per_vn m_rs_vn;
2857                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2858                         u32 addr = BAR_XSTRORM_INTMEM +
2859                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2860
2861                         bp->mf_config[BP_VN(bp)] = mf_config;
2862
2863                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2864                         m_rs_vn.vn_counter.rate =
2865                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2866                         m_rs_vn.vn_counter.quota =
2867                                 (m_rs_vn.vn_counter.rate *
2868                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2869
2870                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2871
2872                         /* read relevant values from mf_cfg struct in shmem */
2873                         vif_id =
2874                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2875                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2876                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2877                         vlan_val =
2878                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2879                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2880                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2881                         vlan_prio = (mf_config &
2882                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2883                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2884                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2885                         vlan_mode =
2886                                 (MF_CFG_RD(bp,
2887                                            func_mf_config[func].afex_config) &
2888                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2889                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2890                         allowed_prio =
2891                                 (MF_CFG_RD(bp,
2892                                            func_mf_config[func].afex_config) &
2893                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2894                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2895
2896                         /* send ramrod to FW, return in case of failure */
2897                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2898                                                    allowed_prio))
2899                                 return;
2900
2901                         bp->afex_def_vlan_tag = vlan_val;
2902                         bp->afex_vlan_mode = vlan_mode;
2903                 } else {
2904                         /* notify link down because BP->flags is disabled */
2905                         bnx2x_link_report(bp);
2906
2907                         /* send INVALID VIF ramrod to FW */
2908                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2909
2910                         /* Reset the default afex VLAN */
2911                         bp->afex_def_vlan_tag = -1;
2912                 }
2913         }
2914 }
2915
2916 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2917 {
2918         struct bnx2x_func_switch_update_params *switch_update_params;
2919         struct bnx2x_func_state_params func_params;
2920
2921         memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2922         switch_update_params = &func_params.params.switch_update;
2923         func_params.f_obj = &bp->func_obj;
2924         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2925
2926         if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2927                 int func = BP_ABS_FUNC(bp);
2928                 u32 val;
2929
2930                 /* Re-learn the S-tag from shmem */
2931                 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2932                                 FUNC_MF_CFG_E1HOV_TAG_MASK;
2933                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2934                         bp->mf_ov = val;
2935                 } else {
2936                         BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2937                         goto fail;
2938                 }
2939
2940                 /* Configure new S-tag in LLH */
2941                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2942                        bp->mf_ov);
2943
2944                 /* Send Ramrod to update FW of change */
2945                 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2946                           &switch_update_params->changes);
2947                 switch_update_params->vlan = bp->mf_ov;
2948
2949                 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2950                         BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2951                                   bp->mf_ov);
2952                         goto fail;
2953                 } else {
2954                         DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2955                            bp->mf_ov);
2956                 }
2957         } else {
2958                 goto fail;
2959         }
2960
2961         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2962         return;
2963 fail:
2964         bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2965 }
2966
2967 static void bnx2x_pmf_update(struct bnx2x *bp)
2968 {
2969         int port = BP_PORT(bp);
2970         u32 val;
2971
2972         bp->port.pmf = 1;
2973         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2974
2975         /*
2976          * We need the mb() to ensure the ordering between the writing to
2977          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2978          */
2979         smp_mb();
2980
2981         /* queue a periodic task */
2982         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2983
2984         bnx2x_dcbx_pmf_update(bp);
2985
2986         /* enable nig attention */
2987         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2988         if (bp->common.int_block == INT_BLOCK_HC) {
2989                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2990                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2991         } else if (!CHIP_IS_E1x(bp)) {
2992                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2993                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2994         }
2995
2996         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2997 }
2998
2999 /* end of Link */
3000
3001 /* slow path */
3002
3003 /*
3004  * General service functions
3005  */
3006
3007 /* send the MCP a request, block until there is a reply */
3008 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3009 {
3010         int mb_idx = BP_FW_MB_IDX(bp);
3011         u32 seq;
3012         u32 rc = 0;
3013         u32 cnt = 1;
3014         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3015
3016         mutex_lock(&bp->fw_mb_mutex);
3017         seq = ++bp->fw_seq;
3018         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3019         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3020
3021         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3022                         (command | seq), param);
3023
3024         do {
3025                 /* let the FW do it's magic ... */
3026                 msleep(delay);
3027
3028                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3029
3030                 /* Give the FW up to 5 second (500*10ms) */
3031         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3032
3033         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3034            cnt*delay, rc, seq);
3035
3036         /* is this a reply to our command? */
3037         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3038                 rc &= FW_MSG_CODE_MASK;
3039         else {
3040                 /* FW BUG! */
3041                 BNX2X_ERR("FW failed to respond!\n");
3042                 bnx2x_fw_dump(bp);
3043                 rc = 0;
3044         }
3045         mutex_unlock(&bp->fw_mb_mutex);
3046
3047         return rc;
3048 }
3049
3050 static void storm_memset_func_cfg(struct bnx2x *bp,
3051                                  struct tstorm_eth_function_common_config *tcfg,
3052                                  u16 abs_fid)
3053 {
3054         size_t size = sizeof(struct tstorm_eth_function_common_config);
3055
3056         u32 addr = BAR_TSTRORM_INTMEM +
3057                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3058
3059         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3060 }
3061
3062 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3063 {
3064         if (CHIP_IS_E1x(bp)) {
3065                 struct tstorm_eth_function_common_config tcfg = {0};
3066
3067                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3068         }
3069
3070         /* Enable the function in the FW */
3071         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3072         storm_memset_func_en(bp, p->func_id, 1);
3073
3074         /* spq */
3075         if (p->spq_active) {
3076                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3077                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3078                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3079         }
3080 }
3081
3082 /**
3083  * bnx2x_get_common_flags - Return common flags
3084  *
3085  * @bp          device handle
3086  * @fp          queue handle
3087  * @zero_stats  TRUE if statistics zeroing is needed
3088  *
3089  * Return the flags that are common for the Tx-only and not normal connections.
3090  */
3091 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3092                                             struct bnx2x_fastpath *fp,
3093                                             bool zero_stats)
3094 {
3095         unsigned long flags = 0;
3096
3097         /* PF driver will always initialize the Queue to an ACTIVE state */
3098         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3099
3100         /* tx only connections collect statistics (on the same index as the
3101          * parent connection). The statistics are zeroed when the parent
3102          * connection is initialized.
3103          */
3104
3105         __set_bit(BNX2X_Q_FLG_STATS, &flags);
3106         if (zero_stats)
3107                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3108
3109         if (bp->flags & TX_SWITCHING)
3110                 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3111
3112         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3113         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3114
3115 #ifdef BNX2X_STOP_ON_ERROR
3116         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3117 #endif
3118
3119         return flags;
3120 }
3121
3122 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3123                                        struct bnx2x_fastpath *fp,
3124                                        bool leading)
3125 {
3126         unsigned long flags = 0;
3127
3128         /* calculate other queue flags */
3129         if (IS_MF_SD(bp))
3130                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3131
3132         if (IS_FCOE_FP(fp)) {
3133                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3134                 /* For FCoE - force usage of default priority (for afex) */
3135                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3136         }
3137
3138         if (fp->mode != TPA_MODE_DISABLED) {
3139                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3140                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3141                 if (fp->mode == TPA_MODE_GRO)
3142                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3143         }
3144
3145         if (leading) {
3146                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3147                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3148         }
3149
3150         /* Always set HW VLAN stripping */
3151         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3152
3153         /* configure silent vlan removal */
3154         if (IS_MF_AFEX(bp))
3155                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3156
3157         return flags | bnx2x_get_common_flags(bp, fp, true);
3158 }
3159
3160 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3161         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3162         u8 cos)
3163 {
3164         gen_init->stat_id = bnx2x_stats_id(fp);
3165         gen_init->spcl_id = fp->cl_id;
3166
3167         /* Always use mini-jumbo MTU for FCoE L2 ring */
3168         if (IS_FCOE_FP(fp))
3169                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3170         else
3171                 gen_init->mtu = bp->dev->mtu;
3172
3173         gen_init->cos = cos;
3174
3175         gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3176 }
3177
3178 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3179         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3180         struct bnx2x_rxq_setup_params *rxq_init)
3181 {
3182         u8 max_sge = 0;
3183         u16 sge_sz = 0;
3184         u16 tpa_agg_size = 0;
3185
3186         if (fp->mode != TPA_MODE_DISABLED) {
3187                 pause->sge_th_lo = SGE_TH_LO(bp);
3188                 pause->sge_th_hi = SGE_TH_HI(bp);
3189
3190                 /* validate SGE ring has enough to cross high threshold */
3191                 WARN_ON(bp->dropless_fc &&
3192                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3193                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3194
3195                 tpa_agg_size = TPA_AGG_SIZE;
3196                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3197                         SGE_PAGE_SHIFT;
3198                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3199                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3200                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3201         }
3202
3203         /* pause - not for e1 */
3204         if (!CHIP_IS_E1(bp)) {
3205                 pause->bd_th_lo = BD_TH_LO(bp);
3206                 pause->bd_th_hi = BD_TH_HI(bp);
3207
3208                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3209                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3210                 /*
3211                  * validate that rings have enough entries to cross
3212                  * high thresholds
3213                  */
3214                 WARN_ON(bp->dropless_fc &&
3215                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3216                                 bp->rx_ring_size);
3217                 WARN_ON(bp->dropless_fc &&
3218                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3219                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3220
3221                 pause->pri_map = 1;
3222         }
3223
3224         /* rxq setup */
3225         rxq_init->dscr_map = fp->rx_desc_mapping;
3226         rxq_init->sge_map = fp->rx_sge_mapping;
3227         rxq_init->rcq_map = fp->rx_comp_mapping;
3228         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3229
3230         /* This should be a maximum number of data bytes that may be
3231          * placed on the BD (not including paddings).
3232          */
3233         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3234                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3235
3236         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3237         rxq_init->tpa_agg_sz = tpa_agg_size;
3238         rxq_init->sge_buf_sz = sge_sz;
3239         rxq_init->max_sges_pkt = max_sge;
3240         rxq_init->rss_engine_id = BP_FUNC(bp);
3241         rxq_init->mcast_engine_id = BP_FUNC(bp);
3242
3243         /* Maximum number or simultaneous TPA aggregation for this Queue.
3244          *
3245          * For PF Clients it should be the maximum available number.
3246          * VF driver(s) may want to define it to a smaller value.
3247          */
3248         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3249
3250         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3251         rxq_init->fw_sb_id = fp->fw_sb_id;
3252
3253         if (IS_FCOE_FP(fp))
3254                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3255         else
3256                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3257         /* configure silent vlan removal
3258          * if multi function mode is afex, then mask default vlan
3259          */
3260         if (IS_MF_AFEX(bp)) {
3261                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3262                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3263         }
3264 }
3265
3266 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3267         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3268         u8 cos)
3269 {
3270         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3271         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3272         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3273         txq_init->fw_sb_id = fp->fw_sb_id;
3274
3275         /*
3276          * set the tss leading client id for TX classification ==
3277          * leading RSS client id
3278          */
3279         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3280
3281         if (IS_FCOE_FP(fp)) {
3282                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3283                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3284         }
3285 }
3286
3287 static void bnx2x_pf_init(struct bnx2x *bp)
3288 {
3289         struct bnx2x_func_init_params func_init = {0};
3290         struct event_ring_data eq_data = { {0} };
3291
3292         if (!CHIP_IS_E1x(bp)) {
3293                 /* reset IGU PF statistics: MSIX + ATTN */
3294                 /* PF */
3295                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3296                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3297                            (CHIP_MODE_IS_4_PORT(bp) ?
3298                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3299                 /* ATTN */
3300                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3301                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3302                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3303                            (CHIP_MODE_IS_4_PORT(bp) ?
3304                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3305         }
3306
3307         func_init.spq_active = true;
3308         func_init.pf_id = BP_FUNC(bp);
3309         func_init.func_id = BP_FUNC(bp);
3310         func_init.spq_map = bp->spq_mapping;
3311         func_init.spq_prod = bp->spq_prod_idx;
3312
3313         bnx2x_func_init(bp, &func_init);
3314
3315         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3316
3317         /*
3318          * Congestion management values depend on the link rate
3319          * There is no active link so initial link rate is set to 10 Gbps.
3320          * When the link comes up The congestion management values are
3321          * re-calculated according to the actual link rate.
3322          */
3323         bp->link_vars.line_speed = SPEED_10000;
3324         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3325
3326         /* Only the PMF sets the HW */
3327         if (bp->port.pmf)
3328                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3329
3330         /* init Event Queue - PCI bus guarantees correct endianity*/
3331         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3332         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3333         eq_data.producer = bp->eq_prod;
3334         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3335         eq_data.sb_id = DEF_SB_ID;
3336         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3337 }
3338
3339 static void bnx2x_e1h_disable(struct bnx2x *bp)
3340 {
3341         int port = BP_PORT(bp);
3342
3343         bnx2x_tx_disable(bp);
3344
3345         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3346 }
3347
3348 static void bnx2x_e1h_enable(struct bnx2x *bp)
3349 {
3350         int port = BP_PORT(bp);
3351
3352         if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3353                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3354
3355         /* Tx queue should be only re-enabled */
3356         netif_tx_wake_all_queues(bp->dev);
3357
3358         /*
3359          * Should not call netif_carrier_on since it will be called if the link
3360          * is up when checking for link state
3361          */
3362 }
3363
3364 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3365
3366 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3367 {
3368         struct eth_stats_info *ether_stat =
3369                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3370         struct bnx2x_vlan_mac_obj *mac_obj =
3371                 &bp->sp_objs->mac_obj;
3372         int i;
3373
3374         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3375                 ETH_STAT_INFO_VERSION_LEN);
3376
3377         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3378          * mac_local field in ether_stat struct. The base address is offset by 2
3379          * bytes to account for the field being 8 bytes but a mac address is
3380          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3381          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3382          * allocated by the ether_stat struct, so the macs will land in their
3383          * proper positions.
3384          */
3385         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3386                 memset(ether_stat->mac_local + i, 0,
3387                        sizeof(ether_stat->mac_local[0]));
3388         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3389                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3390                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3391                                 ETH_ALEN);
3392         ether_stat->mtu_size = bp->dev->mtu;
3393         if (bp->dev->features & NETIF_F_RXCSUM)
3394                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3395         if (bp->dev->features & NETIF_F_TSO)
3396                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3397         ether_stat->feature_flags |= bp->common.boot_mode;
3398
3399         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3400
3401         ether_stat->txq_size = bp->tx_ring_size;
3402         ether_stat->rxq_size = bp->rx_ring_size;
3403
3404 #ifdef CONFIG_BNX2X_SRIOV
3405         ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3406 #endif
3407 }
3408
3409 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3410 {
3411         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3412         struct fcoe_stats_info *fcoe_stat =
3413                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3414
3415         if (!CNIC_LOADED(bp))
3416                 return;
3417
3418         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3419
3420         fcoe_stat->qos_priority =
3421                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3422
3423         /* insert FCoE stats from ramrod response */
3424         if (!NO_FCOE(bp)) {
3425                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3426                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3427                         tstorm_queue_statistics;
3428
3429                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3430                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3431                         xstorm_queue_statistics;
3432
3433                 struct fcoe_statistics_params *fw_fcoe_stat =
3434                         &bp->fw_stats_data->fcoe;
3435
3436                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3437                           fcoe_stat->rx_bytes_lo,
3438                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3439
3440                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3441                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3442                           fcoe_stat->rx_bytes_lo,
3443                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3444
3445                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3446                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3447                           fcoe_stat->rx_bytes_lo,
3448                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3449
3450                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3451                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3452                           fcoe_stat->rx_bytes_lo,
3453                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3454
3455                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3456                           fcoe_stat->rx_frames_lo,
3457                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3458
3459                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3460                           fcoe_stat->rx_frames_lo,
3461                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3462
3463                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3464                           fcoe_stat->rx_frames_lo,
3465                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3466
3467                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3468                           fcoe_stat->rx_frames_lo,
3469                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3470
3471                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3472                           fcoe_stat->tx_bytes_lo,
3473                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3474
3475                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3476                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3477                           fcoe_stat->tx_bytes_lo,
3478                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3479
3480                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3481                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3482                           fcoe_stat->tx_bytes_lo,
3483                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3484
3485                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3486                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3487                           fcoe_stat->tx_bytes_lo,
3488                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3489
3490                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3491                           fcoe_stat->tx_frames_lo,
3492                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3493
3494                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3495                           fcoe_stat->tx_frames_lo,
3496                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3497
3498                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3499                           fcoe_stat->tx_frames_lo,
3500                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3501
3502                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3503                           fcoe_stat->tx_frames_lo,
3504                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3505         }
3506
3507         /* ask L5 driver to add data to the struct */
3508         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3509 }
3510
3511 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3512 {
3513         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3514         struct iscsi_stats_info *iscsi_stat =
3515                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3516
3517         if (!CNIC_LOADED(bp))
3518                 return;
3519
3520         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3521                ETH_ALEN);
3522
3523         iscsi_stat->qos_priority =
3524                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3525
3526         /* ask L5 driver to add data to the struct */
3527         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3528 }
3529
3530 /* called due to MCP event (on pmf):
3531  *      reread new bandwidth configuration
3532  *      configure FW
3533  *      notify others function about the change
3534  */
3535 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3536 {
3537         if (bp->link_vars.link_up) {
3538                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3539                 bnx2x_link_sync_notify(bp);
3540         }
3541         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3542 }
3543
3544 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3545 {
3546         bnx2x_config_mf_bw(bp);
3547         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3548 }
3549
3550 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3551 {
3552         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3553         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3554 }
3555
3556 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH        (20)
3557 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT         (25)
3558
3559 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3560 {
3561         enum drv_info_opcode op_code;
3562         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3563         bool release = false;
3564         int wait;
3565
3566         /* if drv_info version supported by MFW doesn't match - send NACK */
3567         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3568                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3569                 return;
3570         }
3571
3572         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3573                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3574
3575         /* Must prevent other flows from accessing drv_info_to_mcp */
3576         mutex_lock(&bp->drv_info_mutex);
3577
3578         memset(&bp->slowpath->drv_info_to_mcp, 0,
3579                sizeof(union drv_info_to_mcp));
3580
3581         switch (op_code) {
3582         case ETH_STATS_OPCODE:
3583                 bnx2x_drv_info_ether_stat(bp);
3584                 break;
3585         case FCOE_STATS_OPCODE:
3586                 bnx2x_drv_info_fcoe_stat(bp);
3587                 break;
3588         case ISCSI_STATS_OPCODE:
3589                 bnx2x_drv_info_iscsi_stat(bp);
3590                 break;
3591         default:
3592                 /* if op code isn't supported - send NACK */
3593                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3594                 goto out;
3595         }
3596
3597         /* if we got drv_info attn from MFW then these fields are defined in
3598          * shmem2 for sure
3599          */
3600         SHMEM2_WR(bp, drv_info_host_addr_lo,
3601                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3602         SHMEM2_WR(bp, drv_info_host_addr_hi,
3603                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3604
3605         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3606
3607         /* Since possible management wants both this and get_driver_version
3608          * need to wait until management notifies us it finished utilizing
3609          * the buffer.
3610          */
3611         if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3612                 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3613         } else if (!bp->drv_info_mng_owner) {
3614                 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3615
3616                 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3617                         u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3618
3619                         /* Management is done; need to clear indication */
3620                         if (indication & bit) {
3621                                 SHMEM2_WR(bp, mfw_drv_indication,
3622                                           indication & ~bit);
3623                                 release = true;
3624                                 break;
3625                         }
3626
3627                         msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3628                 }
3629         }
3630         if (!release) {
3631                 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3632                 bp->drv_info_mng_owner = true;
3633         }
3634
3635 out:
3636         mutex_unlock(&bp->drv_info_mutex);
3637 }
3638
3639 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3640 {
3641         u8 vals[4];
3642         int i = 0;
3643
3644         if (bnx2x_format) {
3645                 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3646                            &vals[0], &vals[1], &vals[2], &vals[3]);
3647                 if (i > 0)
3648                         vals[0] -= '0';
3649         } else {
3650                 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3651                            &vals[0], &vals[1], &vals[2], &vals[3]);
3652         }
3653
3654         while (i < 4)
3655                 vals[i++] = 0;
3656
3657         return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3658 }
3659
3660 void bnx2x_update_mng_version(struct bnx2x *bp)
3661 {
3662         u32 iscsiver = DRV_VER_NOT_LOADED;
3663         u32 fcoever = DRV_VER_NOT_LOADED;
3664         u32 ethver = DRV_VER_NOT_LOADED;
3665         int idx = BP_FW_MB_IDX(bp);
3666         u8 *version;
3667
3668         if (!SHMEM2_HAS(bp, func_os_drv_ver))
3669                 return;
3670
3671         mutex_lock(&bp->drv_info_mutex);
3672         /* Must not proceed when `bnx2x_handle_drv_info_req' is feasible */
3673         if (bp->drv_info_mng_owner)
3674                 goto out;
3675
3676         if (bp->state != BNX2X_STATE_OPEN)
3677                 goto out;
3678
3679         /* Parse ethernet driver version */
3680         ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3681         if (!CNIC_LOADED(bp))
3682                 goto out;
3683
3684         /* Try getting storage driver version via cnic */
3685         memset(&bp->slowpath->drv_info_to_mcp, 0,
3686                sizeof(union drv_info_to_mcp));
3687         bnx2x_drv_info_iscsi_stat(bp);
3688         version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3689         iscsiver = bnx2x_update_mng_version_utility(version, false);
3690
3691         memset(&bp->slowpath->drv_info_to_mcp, 0,
3692                sizeof(union drv_info_to_mcp));
3693         bnx2x_drv_info_fcoe_stat(bp);
3694         version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3695         fcoever = bnx2x_update_mng_version_utility(version, false);
3696
3697 out:
3698         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3699         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3700         SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3701
3702         mutex_unlock(&bp->drv_info_mutex);
3703
3704         DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3705            ethver, iscsiver, fcoever);
3706 }
3707
3708 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3709 {
3710         u32 drv_ver;
3711         u32 valid_dump;
3712
3713         if (!SHMEM2_HAS(bp, drv_info))
3714                 return;
3715
3716         /* Update Driver load time, possibly broken in y2038 */
3717         SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3718
3719         drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3720         SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3721
3722         SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3723
3724         /* Check & notify On-Chip dump. */
3725         valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3726
3727         if (valid_dump & FIRST_DUMP_VALID)
3728                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3729
3730         if (valid_dump & SECOND_DUMP_VALID)
3731                 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3732 }
3733
3734 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3735 {
3736         u32 cmd_ok, cmd_fail;
3737
3738         /* sanity */
3739         if (event & DRV_STATUS_DCC_EVENT_MASK &&
3740             event & DRV_STATUS_OEM_EVENT_MASK) {
3741                 BNX2X_ERR("Received simultaneous events %08x\n", event);
3742                 return;
3743         }
3744
3745         if (event & DRV_STATUS_DCC_EVENT_MASK) {
3746                 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3747                 cmd_ok = DRV_MSG_CODE_DCC_OK;
3748         } else /* if (event & DRV_STATUS_OEM_EVENT_MASK) */ {
3749                 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3750                 cmd_ok = DRV_MSG_CODE_OEM_OK;
3751         }
3752
3753         DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3754
3755         if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3756                      DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3757                 /* This is the only place besides the function initialization
3758                  * where the bp->flags can change so it is done without any
3759                  * locks
3760                  */
3761                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3762                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3763                         bp->flags |= MF_FUNC_DIS;
3764
3765                         bnx2x_e1h_disable(bp);
3766                 } else {
3767                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3768                         bp->flags &= ~MF_FUNC_DIS;
3769
3770                         bnx2x_e1h_enable(bp);
3771                 }
3772                 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3773                            DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3774         }
3775
3776         if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3777                      DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3778                 bnx2x_config_mf_bw(bp);
3779                 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3780                            DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3781         }
3782
3783         /* Report results to MCP */
3784         if (event)
3785                 bnx2x_fw_command(bp, cmd_fail, 0);
3786         else
3787                 bnx2x_fw_command(bp, cmd_ok, 0);
3788 }
3789
3790 /* must be called under the spq lock */
3791 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3792 {
3793         struct eth_spe *next_spe = bp->spq_prod_bd;
3794
3795         if (bp->spq_prod_bd == bp->spq_last_bd) {
3796                 bp->spq_prod_bd = bp->spq;
3797                 bp->spq_prod_idx = 0;
3798                 DP(BNX2X_MSG_SP, "end of spq\n");
3799         } else {
3800                 bp->spq_prod_bd++;
3801                 bp->spq_prod_idx++;
3802         }
3803         return next_spe;
3804 }
3805
3806 /* must be called under the spq lock */
3807 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3808 {
3809         int func = BP_FUNC(bp);
3810
3811         /*
3812          * Make sure that BD data is updated before writing the producer:
3813          * BD data is written to the memory, the producer is read from the
3814          * memory, thus we need a full memory barrier to ensure the ordering.
3815          */
3816         mb();
3817
3818         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3819                  bp->spq_prod_idx);
3820         mmiowb();
3821 }
3822
3823 /**
3824  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3825  *
3826  * @cmd:        command to check
3827  * @cmd_type:   command type
3828  */
3829 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3830 {
3831         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3832             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3833             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3834             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3835             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3836             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3837             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3838                 return true;
3839         else
3840                 return false;
3841 }
3842
3843 /**
3844  * bnx2x_sp_post - place a single command on an SP ring
3845  *
3846  * @bp:         driver handle
3847  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3848  * @cid:        SW CID the command is related to
3849  * @data_hi:    command private data address (high 32 bits)
3850  * @data_lo:    command private data address (low 32 bits)
3851  * @cmd_type:   command type (e.g. NONE, ETH)
3852  *
3853  * SP data is handled as if it's always an address pair, thus data fields are
3854  * not swapped to little endian in upper functions. Instead this function swaps
3855  * data as if it's two u32 fields.
3856  */
3857 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3858                   u32 data_hi, u32 data_lo, int cmd_type)
3859 {
3860         struct eth_spe *spe;
3861         u16 type;
3862         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3863
3864 #ifdef BNX2X_STOP_ON_ERROR
3865         if (unlikely(bp->panic)) {
3866                 BNX2X_ERR("Can't post SP when there is panic\n");
3867                 return -EIO;
3868         }
3869 #endif
3870
3871         spin_lock_bh(&bp->spq_lock);
3872
3873         if (common) {
3874                 if (!atomic_read(&bp->eq_spq_left)) {
3875                         BNX2X_ERR("BUG! EQ ring full!\n");
3876                         spin_unlock_bh(&bp->spq_lock);
3877                         bnx2x_panic();
3878                         return -EBUSY;
3879                 }
3880         } else if (!atomic_read(&bp->cq_spq_left)) {
3881                         BNX2X_ERR("BUG! SPQ ring full!\n");
3882                         spin_unlock_bh(&bp->spq_lock);
3883                         bnx2x_panic();
3884                         return -EBUSY;
3885         }
3886
3887         spe = bnx2x_sp_get_next(bp);
3888
3889         /* CID needs port number to be encoded int it */
3890         spe->hdr.conn_and_cmd_data =
3891                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3892                                     HW_CID(bp, cid));
3893
3894         /* In some cases, type may already contain the func-id
3895          * mainly in SRIOV related use cases, so we add it here only
3896          * if it's not already set.
3897          */
3898         if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3899                 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3900                         SPE_HDR_CONN_TYPE;
3901                 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3902                          SPE_HDR_FUNCTION_ID);
3903         } else {
3904                 type = cmd_type;
3905         }
3906
3907         spe->hdr.type = cpu_to_le16(type);
3908
3909         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3910         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3911
3912         /*
3913          * It's ok if the actual decrement is issued towards the memory
3914          * somewhere between the spin_lock and spin_unlock. Thus no
3915          * more explicit memory barrier is needed.
3916          */
3917         if (common)
3918                 atomic_dec(&bp->eq_spq_left);
3919         else
3920                 atomic_dec(&bp->cq_spq_left);
3921
3922         DP(BNX2X_MSG_SP,
3923            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3924            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3925            (u32)(U64_LO(bp->spq_mapping) +
3926            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3927            HW_CID(bp, cid), data_hi, data_lo, type,
3928            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3929
3930         bnx2x_sp_prod_update(bp);
3931         spin_unlock_bh(&bp->spq_lock);
3932         return 0;
3933 }
3934
3935 /* acquire split MCP access lock register */
3936 static int bnx2x_acquire_alr(struct bnx2x *bp)
3937 {
3938         u32 j, val;
3939         int rc = 0;
3940
3941         might_sleep();
3942         for (j = 0; j < 1000; j++) {
3943                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3944                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3945                 if (val & MCPR_ACCESS_LOCK_LOCK)
3946                         break;
3947
3948                 usleep_range(5000, 10000);
3949         }
3950         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3951                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3952                 rc = -EBUSY;
3953         }
3954
3955         return rc;
3956 }
3957
3958 /* release split MCP access lock register */
3959 static void bnx2x_release_alr(struct bnx2x *bp)
3960 {
3961         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3962 }
3963
3964 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3965 #define BNX2X_DEF_SB_IDX        0x0002
3966
3967 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3968 {
3969         struct host_sp_status_block *def_sb = bp->def_status_blk;
3970         u16 rc = 0;
3971
3972         barrier(); /* status block is written to by the chip */
3973         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3974                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3975                 rc |= BNX2X_DEF_SB_ATT_IDX;
3976         }
3977
3978         if (bp->def_idx != def_sb->sp_sb.running_index) {
3979                 bp->def_idx = def_sb->sp_sb.running_index;
3980                 rc |= BNX2X_DEF_SB_IDX;
3981         }
3982
3983         /* Do not reorder: indices reading should complete before handling */
3984         barrier();
3985         return rc;
3986 }
3987
3988 /*
3989  * slow path service functions
3990  */
3991
3992 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3993 {
3994         int port = BP_PORT(bp);
3995         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3996                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3997         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3998                                        NIG_REG_MASK_INTERRUPT_PORT0;
3999         u32 aeu_mask;
4000         u32 nig_mask = 0;
4001         u32 reg_addr;
4002
4003         if (bp->attn_state & asserted)
4004                 BNX2X_ERR("IGU ERROR\n");
4005
4006         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4007         aeu_mask = REG_RD(bp, aeu_addr);
4008
4009         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
4010            aeu_mask, asserted);
4011         aeu_mask &= ~(asserted & 0x3ff);
4012         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4013
4014         REG_WR(bp, aeu_addr, aeu_mask);
4015         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4016
4017         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4018         bp->attn_state |= asserted;
4019         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4020
4021         if (asserted & ATTN_HARD_WIRED_MASK) {
4022                 if (asserted & ATTN_NIG_FOR_FUNC) {
4023
4024                         bnx2x_acquire_phy_lock(bp);
4025
4026                         /* save nig interrupt mask */
4027                         nig_mask = REG_RD(bp, nig_int_mask_addr);
4028
4029                         /* If nig_mask is not set, no need to call the update
4030                          * function.
4031                          */
4032                         if (nig_mask) {
4033                                 REG_WR(bp, nig_int_mask_addr, 0);
4034
4035                                 bnx2x_link_attn(bp);
4036                         }
4037
4038                         /* handle unicore attn? */
4039                 }
4040                 if (asserted & ATTN_SW_TIMER_4_FUNC)
4041                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4042
4043                 if (asserted & GPIO_2_FUNC)
4044                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4045
4046                 if (asserted & GPIO_3_FUNC)
4047                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4048
4049                 if (asserted & GPIO_4_FUNC)
4050                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4051
4052                 if (port == 0) {
4053                         if (asserted & ATTN_GENERAL_ATTN_1) {
4054                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4055                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4056                         }
4057                         if (asserted & ATTN_GENERAL_ATTN_2) {
4058                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4059                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4060                         }
4061                         if (asserted & ATTN_GENERAL_ATTN_3) {
4062                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4063                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4064                         }
4065                 } else {
4066                         if (asserted & ATTN_GENERAL_ATTN_4) {
4067                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4068                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4069                         }
4070                         if (asserted & ATTN_GENERAL_ATTN_5) {
4071                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4072                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4073                         }
4074                         if (asserted & ATTN_GENERAL_ATTN_6) {
4075                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4076                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4077                         }
4078                 }
4079
4080         } /* if hardwired */
4081
4082         if (bp->common.int_block == INT_BLOCK_HC)
4083                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4084                             COMMAND_REG_ATTN_BITS_SET);
4085         else
4086                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4087
4088         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4089            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4090         REG_WR(bp, reg_addr, asserted);
4091
4092         /* now set back the mask */
4093         if (asserted & ATTN_NIG_FOR_FUNC) {
4094                 /* Verify that IGU ack through BAR was written before restoring
4095                  * NIG mask. This loop should exit after 2-3 iterations max.
4096                  */
4097                 if (bp->common.int_block != INT_BLOCK_HC) {
4098                         u32 cnt = 0, igu_acked;
4099                         do {
4100                                 igu_acked = REG_RD(bp,
4101                                                    IGU_REG_ATTENTION_ACK_BITS);
4102                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4103                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
4104                         if (!igu_acked)
4105                                 DP(NETIF_MSG_HW,
4106                                    "Failed to verify IGU ack on time\n");
4107                         barrier();
4108                 }
4109                 REG_WR(bp, nig_int_mask_addr, nig_mask);
4110                 bnx2x_release_phy_lock(bp);
4111         }
4112 }
4113
4114 static void bnx2x_fan_failure(struct bnx2x *bp)
4115 {
4116         int port = BP_PORT(bp);
4117         u32 ext_phy_config;
4118         /* mark the failure */
4119         ext_phy_config =
4120                 SHMEM_RD(bp,
4121                          dev_info.port_hw_config[port].external_phy_config);
4122
4123         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4124         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4125         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4126                  ext_phy_config);
4127
4128         /* log the failure */
4129         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4130                             "Please contact OEM Support for assistance\n");
4131
4132         /* Schedule device reset (unload)
4133          * This is due to some boards consuming sufficient power when driver is
4134          * up to overheat if fan fails.
4135          */
4136         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4137 }
4138
4139 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4140 {
4141         int port = BP_PORT(bp);
4142         int reg_offset;
4143         u32 val;
4144
4145         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4146                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4147
4148         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4149
4150                 val = REG_RD(bp, reg_offset);
4151                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4152                 REG_WR(bp, reg_offset, val);
4153
4154                 BNX2X_ERR("SPIO5 hw attention\n");
4155
4156                 /* Fan failure attention */
4157                 bnx2x_hw_reset_phy(&bp->link_params);
4158                 bnx2x_fan_failure(bp);
4159         }
4160
4161         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4162                 bnx2x_acquire_phy_lock(bp);
4163                 bnx2x_handle_module_detect_int(&bp->link_params);
4164                 bnx2x_release_phy_lock(bp);
4165         }
4166
4167         if (attn & HW_INTERRUT_ASSERT_SET_0) {
4168
4169                 val = REG_RD(bp, reg_offset);
4170                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
4171                 REG_WR(bp, reg_offset, val);
4172
4173                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4174                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
4175                 bnx2x_panic();
4176         }
4177 }
4178
4179 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4180 {
4181         u32 val;
4182
4183         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4184
4185                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4186                 BNX2X_ERR("DB hw attention 0x%x\n", val);
4187                 /* DORQ discard attention */
4188                 if (val & 0x2)
4189                         BNX2X_ERR("FATAL error from DORQ\n");
4190         }
4191
4192         if (attn & HW_INTERRUT_ASSERT_SET_1) {
4193
4194                 int port = BP_PORT(bp);
4195                 int reg_offset;
4196
4197                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4198                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4199
4200                 val = REG_RD(bp, reg_offset);
4201                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
4202                 REG_WR(bp, reg_offset, val);
4203
4204                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4205                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
4206                 bnx2x_panic();
4207         }
4208 }
4209
4210 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4211 {
4212         u32 val;
4213
4214         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4215
4216                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4217                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4218                 /* CFC error attention */
4219                 if (val & 0x2)
4220                         BNX2X_ERR("FATAL error from CFC\n");
4221         }
4222
4223         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4224                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4225                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4226                 /* RQ_USDMDP_FIFO_OVERFLOW */
4227                 if (val & 0x18000)
4228                         BNX2X_ERR("FATAL error from PXP\n");
4229
4230                 if (!CHIP_IS_E1x(bp)) {
4231                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4232                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4233                 }
4234         }
4235
4236         if (attn & HW_INTERRUT_ASSERT_SET_2) {
4237
4238                 int port = BP_PORT(bp);
4239                 int reg_offset;
4240
4241                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4242                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4243
4244                 val = REG_RD(bp, reg_offset);
4245                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
4246                 REG_WR(bp, reg_offset, val);
4247
4248                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4249                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
4250                 bnx2x_panic();
4251         }
4252 }
4253
4254 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4255 {
4256         u32 val;
4257
4258         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4259
4260                 if (attn & BNX2X_PMF_LINK_ASSERT) {
4261                         int func = BP_FUNC(bp);
4262
4263                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4264                         bnx2x_read_mf_cfg(bp);
4265                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4266                                         func_mf_config[BP_ABS_FUNC(bp)].config);
4267                         val = SHMEM_RD(bp,
4268                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
4269
4270                         if (val & (DRV_STATUS_DCC_EVENT_MASK |
4271                                    DRV_STATUS_OEM_EVENT_MASK))
4272                                 bnx2x_oem_event(bp,
4273                                         (val & (DRV_STATUS_DCC_EVENT_MASK |
4274                                                 DRV_STATUS_OEM_EVENT_MASK)));
4275
4276                         if (val & DRV_STATUS_SET_MF_BW)
4277                                 bnx2x_set_mf_bw(bp);
4278
4279                         if (val & DRV_STATUS_DRV_INFO_REQ)
4280                                 bnx2x_handle_drv_info_req(bp);
4281
4282                         if (val & DRV_STATUS_VF_DISABLED)
4283                                 bnx2x_schedule_iov_task(bp,
4284                                                         BNX2X_IOV_HANDLE_FLR);
4285
4286                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4287                                 bnx2x_pmf_update(bp);
4288
4289                         if (bp->port.pmf &&
4290                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4291                                 bp->dcbx_enabled > 0)
4292                                 /* start dcbx state machine */
4293                                 bnx2x_dcbx_set_params(bp,
4294                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4295                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4296                                 bnx2x_handle_afex_cmd(bp,
4297                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4298                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4299                                 bnx2x_handle_eee_event(bp);
4300
4301                         if (val & DRV_STATUS_OEM_UPDATE_SVID)
4302                                 bnx2x_handle_update_svid_cmd(bp);
4303
4304                         if (bp->link_vars.periodic_flags &
4305                             PERIODIC_FLAGS_LINK_EVENT) {
4306                                 /*  sync with link */
4307                                 bnx2x_acquire_phy_lock(bp);
4308                                 bp->link_vars.periodic_flags &=
4309                                         ~PERIODIC_FLAGS_LINK_EVENT;
4310                                 bnx2x_release_phy_lock(bp);
4311                                 if (IS_MF(bp))
4312                                         bnx2x_link_sync_notify(bp);
4313                                 bnx2x_link_report(bp);
4314                         }
4315                         /* Always call it here: bnx2x_link_report() will
4316                          * prevent the link indication duplication.
4317                          */
4318                         bnx2x__link_status_update(bp);
4319                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4320
4321                         BNX2X_ERR("MC assert!\n");
4322                         bnx2x_mc_assert(bp);
4323                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4324                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4325                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4326                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4327                         bnx2x_panic();
4328
4329                 } else if (attn & BNX2X_MCP_ASSERT) {
4330
4331                         BNX2X_ERR("MCP assert!\n");
4332                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4333                         bnx2x_fw_dump(bp);
4334
4335                 } else
4336                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4337         }
4338
4339         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4340                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4341                 if (attn & BNX2X_GRC_TIMEOUT) {
4342                         val = CHIP_IS_E1(bp) ? 0 :
4343                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4344                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4345                 }
4346                 if (attn & BNX2X_GRC_RSV) {
4347                         val = CHIP_IS_E1(bp) ? 0 :
4348                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4349                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4350                 }
4351                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4352         }
4353 }
4354
4355 /*
4356  * Bits map:
4357  * 0-7   - Engine0 load counter.
4358  * 8-15  - Engine1 load counter.
4359  * 16    - Engine0 RESET_IN_PROGRESS bit.
4360  * 17    - Engine1 RESET_IN_PROGRESS bit.
4361  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4362  *         on the engine
4363  * 19    - Engine1 ONE_IS_LOADED.
4364  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4365  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4366  *         just the one belonging to its engine).
4367  *
4368  */
4369 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4370
4371 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4372 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4373 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4374 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4375 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4376 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4377 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4378
4379 /*
4380  * Set the GLOBAL_RESET bit.
4381  *
4382  * Should be run under rtnl lock
4383  */
4384 void bnx2x_set_reset_global(struct bnx2x *bp)
4385 {
4386         u32 val;
4387         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4388         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4389         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4390         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4391 }
4392
4393 /*
4394  * Clear the GLOBAL_RESET bit.
4395  *
4396  * Should be run under rtnl lock
4397  */
4398 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4399 {
4400         u32 val;
4401         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4402         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4403         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4404         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4405 }
4406
4407 /*
4408  * Checks the GLOBAL_RESET bit.
4409  *
4410  * should be run under rtnl lock
4411  */
4412 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4413 {
4414         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4415
4416         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4417         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4418 }
4419
4420 /*
4421  * Clear RESET_IN_PROGRESS bit for the current engine.
4422  *
4423  * Should be run under rtnl lock
4424  */
4425 static void bnx2x_set_reset_done(struct bnx2x *bp)
4426 {
4427         u32 val;
4428         u32 bit = BP_PATH(bp) ?
4429                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4430         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4431         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4432
4433         /* Clear the bit */
4434         val &= ~bit;
4435         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4436
4437         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4438 }
4439
4440 /*
4441  * Set RESET_IN_PROGRESS for the current engine.
4442  *
4443  * should be run under rtnl lock
4444  */
4445 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4446 {
4447         u32 val;
4448         u32 bit = BP_PATH(bp) ?
4449                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4450         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4451         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4452
4453         /* Set the bit */
4454         val |= bit;
4455         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4456         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4457 }
4458
4459 /*
4460  * Checks the RESET_IN_PROGRESS bit for the given engine.
4461  * should be run under rtnl lock
4462  */
4463 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4464 {
4465         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4466         u32 bit = engine ?
4467                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4468
4469         /* return false if bit is set */
4470         return (val & bit) ? false : true;
4471 }
4472
4473 /*
4474  * set pf load for the current pf.
4475  *
4476  * should be run under rtnl lock
4477  */
4478 void bnx2x_set_pf_load(struct bnx2x *bp)
4479 {
4480         u32 val1, val;
4481         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4482                              BNX2X_PATH0_LOAD_CNT_MASK;
4483         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4484                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4485
4486         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4487         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4488
4489         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4490
4491         /* get the current counter value */
4492         val1 = (val & mask) >> shift;
4493
4494         /* set bit of that PF */
4495         val1 |= (1 << bp->pf_num);
4496
4497         /* clear the old value */
4498         val &= ~mask;
4499
4500         /* set the new one */
4501         val |= ((val1 << shift) & mask);
4502
4503         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4504         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4505 }
4506
4507 /**
4508  * bnx2x_clear_pf_load - clear pf load mark
4509  *
4510  * @bp:         driver handle
4511  *
4512  * Should be run under rtnl lock.
4513  * Decrements the load counter for the current engine. Returns
4514  * whether other functions are still loaded
4515  */
4516 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4517 {
4518         u32 val1, val;
4519         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4520                              BNX2X_PATH0_LOAD_CNT_MASK;
4521         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4522                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4523
4524         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4525         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4526         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4527
4528         /* get the current counter value */
4529         val1 = (val & mask) >> shift;
4530
4531         /* clear bit of that PF */
4532         val1 &= ~(1 << bp->pf_num);
4533
4534         /* clear the old value */
4535         val &= ~mask;
4536
4537         /* set the new one */
4538         val |= ((val1 << shift) & mask);
4539
4540         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4541         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4542         return val1 != 0;
4543 }
4544
4545 /*
4546  * Read the load status for the current engine.
4547  *
4548  * should be run under rtnl lock
4549  */
4550 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4551 {
4552         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4553                              BNX2X_PATH0_LOAD_CNT_MASK);
4554         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4555                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4556         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4557
4558         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4559
4560         val = (val & mask) >> shift;
4561
4562         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4563            engine, val);
4564
4565         return val != 0;
4566 }
4567
4568 static void _print_parity(struct bnx2x *bp, u32 reg)
4569 {
4570         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4571 }
4572
4573 static void _print_next_block(int idx, const char *blk)
4574 {
4575         pr_cont("%s%s", idx ? ", " : "", blk);
4576 }
4577
4578 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4579                                             int *par_num, bool print)
4580 {
4581         u32 cur_bit;
4582         bool res;
4583         int i;
4584
4585         res = false;
4586
4587         for (i = 0; sig; i++) {
4588                 cur_bit = (0x1UL << i);
4589                 if (sig & cur_bit) {
4590                         res |= true; /* Each bit is real error! */
4591
4592                         if (print) {
4593                                 switch (cur_bit) {
4594                                 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4595                                         _print_next_block((*par_num)++, "BRB");
4596                                         _print_parity(bp,
4597                                                       BRB1_REG_BRB1_PRTY_STS);
4598                                         break;
4599                                 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4600                                         _print_next_block((*par_num)++,
4601                                                           "PARSER");
4602                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4603                                         break;
4604                                 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4605                                         _print_next_block((*par_num)++, "TSDM");
4606                                         _print_parity(bp,
4607                                                       TSDM_REG_TSDM_PRTY_STS);
4608                                         break;
4609                                 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4610                                         _print_next_block((*par_num)++,
4611                                                           "SEARCHER");
4612                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4613                                         break;
4614                                 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4615                                         _print_next_block((*par_num)++, "TCM");
4616                                         _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4617                                         break;
4618                                 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4619                                         _print_next_block((*par_num)++,
4620                                                           "TSEMI");
4621                                         _print_parity(bp,
4622                                                       TSEM_REG_TSEM_PRTY_STS_0);
4623                                         _print_parity(bp,
4624                                                       TSEM_REG_TSEM_PRTY_STS_1);
4625                                         break;
4626                                 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4627                                         _print_next_block((*par_num)++, "XPB");
4628                                         _print_parity(bp, GRCBASE_XPB +
4629                                                           PB_REG_PB_PRTY_STS);
4630                                         break;
4631                                 }
4632                         }
4633
4634                         /* Clear the bit */
4635                         sig &= ~cur_bit;
4636                 }
4637         }
4638
4639         return res;
4640 }
4641
4642 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4643                                             int *par_num, bool *global,
4644                                             bool print)
4645 {
4646         u32 cur_bit;
4647         bool res;
4648         int i;
4649
4650         res = false;
4651
4652         for (i = 0; sig; i++) {
4653                 cur_bit = (0x1UL << i);
4654                 if (sig & cur_bit) {
4655                         res |= true; /* Each bit is real error! */
4656                         switch (cur_bit) {
4657                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4658                                 if (print) {
4659                                         _print_next_block((*par_num)++, "PBF");
4660                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4661                                 }
4662                                 break;
4663                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4664                                 if (print) {
4665                                         _print_next_block((*par_num)++, "QM");
4666                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4667                                 }
4668                                 break;
4669                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4670                                 if (print) {
4671                                         _print_next_block((*par_num)++, "TM");
4672                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4673                                 }
4674                                 break;
4675                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4676                                 if (print) {
4677                                         _print_next_block((*par_num)++, "XSDM");
4678                                         _print_parity(bp,
4679                                                       XSDM_REG_XSDM_PRTY_STS);
4680                                 }
4681                                 break;
4682                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4683                                 if (print) {
4684                                         _print_next_block((*par_num)++, "XCM");
4685                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4686                                 }
4687                                 break;
4688                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4689                                 if (print) {
4690                                         _print_next_block((*par_num)++,
4691                                                           "XSEMI");
4692                                         _print_parity(bp,
4693                                                       XSEM_REG_XSEM_PRTY_STS_0);
4694                                         _print_parity(bp,
4695                                                       XSEM_REG_XSEM_PRTY_STS_1);
4696                                 }
4697                                 break;
4698                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4699                                 if (print) {
4700                                         _print_next_block((*par_num)++,
4701                                                           "DOORBELLQ");
4702                                         _print_parity(bp,
4703                                                       DORQ_REG_DORQ_PRTY_STS);
4704                                 }
4705                                 break;
4706                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4707                                 if (print) {
4708                                         _print_next_block((*par_num)++, "NIG");
4709                                         if (CHIP_IS_E1x(bp)) {
4710                                                 _print_parity(bp,
4711                                                         NIG_REG_NIG_PRTY_STS);
4712                                         } else {
4713                                                 _print_parity(bp,
4714                                                         NIG_REG_NIG_PRTY_STS_0);
4715                                                 _print_parity(bp,
4716                                                         NIG_REG_NIG_PRTY_STS_1);
4717                                         }
4718                                 }
4719                                 break;
4720                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4721                                 if (print)
4722                                         _print_next_block((*par_num)++,
4723                                                           "VAUX PCI CORE");
4724                                 *global = true;
4725                                 break;
4726                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4727                                 if (print) {
4728                                         _print_next_block((*par_num)++,
4729                                                           "DEBUG");
4730                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4731                                 }
4732                                 break;
4733                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4734                                 if (print) {
4735                                         _print_next_block((*par_num)++, "USDM");
4736                                         _print_parity(bp,
4737                                                       USDM_REG_USDM_PRTY_STS);
4738                                 }
4739                                 break;
4740                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4741                                 if (print) {
4742                                         _print_next_block((*par_num)++, "UCM");
4743                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4744                                 }
4745                                 break;
4746                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4747                                 if (print) {
4748                                         _print_next_block((*par_num)++,
4749                                                           "USEMI");
4750                                         _print_parity(bp,
4751                                                       USEM_REG_USEM_PRTY_STS_0);
4752                                         _print_parity(bp,
4753                                                       USEM_REG_USEM_PRTY_STS_1);
4754                                 }
4755                                 break;
4756                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4757                                 if (print) {
4758                                         _print_next_block((*par_num)++, "UPB");
4759                                         _print_parity(bp, GRCBASE_UPB +
4760                                                           PB_REG_PB_PRTY_STS);
4761                                 }
4762                                 break;
4763                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4764                                 if (print) {
4765                                         _print_next_block((*par_num)++, "CSDM");
4766                                         _print_parity(bp,
4767                                                       CSDM_REG_CSDM_PRTY_STS);
4768                                 }
4769                                 break;
4770                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4771                                 if (print) {
4772                                         _print_next_block((*par_num)++, "CCM");
4773                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4774                                 }
4775                                 break;
4776                         }
4777
4778                         /* Clear the bit */
4779                         sig &= ~cur_bit;
4780                 }
4781         }
4782
4783         return res;
4784 }
4785
4786 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4787                                             int *par_num, bool print)
4788 {
4789         u32 cur_bit;
4790         bool res;
4791         int i;
4792
4793         res = false;
4794
4795         for (i = 0; sig; i++) {
4796                 cur_bit = (0x1UL << i);
4797                 if (sig & cur_bit) {
4798                         res = true; /* Each bit is real error! */
4799                         if (print) {
4800                                 switch (cur_bit) {
4801                                 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4802                                         _print_next_block((*par_num)++,
4803                                                           "CSEMI");
4804                                         _print_parity(bp,
4805                                                       CSEM_REG_CSEM_PRTY_STS_0);
4806                                         _print_parity(bp,
4807                                                       CSEM_REG_CSEM_PRTY_STS_1);
4808                                         break;
4809                                 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4810                                         _print_next_block((*par_num)++, "PXP");
4811                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4812                                         _print_parity(bp,
4813                                                       PXP2_REG_PXP2_PRTY_STS_0);
4814                                         _print_parity(bp,
4815                                                       PXP2_REG_PXP2_PRTY_STS_1);
4816                                         break;
4817                                 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4818                                         _print_next_block((*par_num)++,
4819                                                           "PXPPCICLOCKCLIENT");
4820                                         break;
4821                                 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4822                                         _print_next_block((*par_num)++, "CFC");
4823                                         _print_parity(bp,
4824                                                       CFC_REG_CFC_PRTY_STS);
4825                                         break;
4826                                 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4827                                         _print_next_block((*par_num)++, "CDU");
4828                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4829                                         break;
4830                                 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4831                                         _print_next_block((*par_num)++, "DMAE");
4832                                         _print_parity(bp,
4833                                                       DMAE_REG_DMAE_PRTY_STS);
4834                                         break;
4835                                 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4836                                         _print_next_block((*par_num)++, "IGU");
4837                                         if (CHIP_IS_E1x(bp))
4838                                                 _print_parity(bp,
4839                                                         HC_REG_HC_PRTY_STS);
4840                                         else
4841                                                 _print_parity(bp,
4842                                                         IGU_REG_IGU_PRTY_STS);
4843                                         break;
4844                                 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4845                                         _print_next_block((*par_num)++, "MISC");
4846                                         _print_parity(bp,
4847                                                       MISC_REG_MISC_PRTY_STS);
4848                                         break;
4849                                 }
4850                         }
4851
4852                         /* Clear the bit */
4853                         sig &= ~cur_bit;
4854                 }
4855         }
4856
4857         return res;
4858 }
4859
4860 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4861                                             int *par_num, bool *global,
4862                                             bool print)
4863 {
4864         bool res = false;
4865         u32 cur_bit;
4866         int i;
4867
4868         for (i = 0; sig; i++) {
4869                 cur_bit = (0x1UL << i);
4870                 if (sig & cur_bit) {
4871                         switch (cur_bit) {
4872                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4873                                 if (print)
4874                                         _print_next_block((*par_num)++,
4875                                                           "MCP ROM");
4876                                 *global = true;
4877                                 res = true;
4878                                 break;
4879                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4880                                 if (print)
4881                                         _print_next_block((*par_num)++,
4882                                                           "MCP UMP RX");
4883                                 *global = true;
4884                                 res = true;
4885                                 break;
4886                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4887                                 if (print)
4888                                         _print_next_block((*par_num)++,
4889                                                           "MCP UMP TX");
4890                                 *global = true;
4891                                 res = true;
4892                                 break;
4893                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4894                                 (*par_num)++;
4895                                 /* clear latched SCPAD PATIRY from MCP */
4896                                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4897                                        1UL << 10);
4898                                 break;
4899                         }
4900
4901                         /* Clear the bit */
4902                         sig &= ~cur_bit;
4903                 }
4904         }
4905
4906         return res;
4907 }
4908
4909 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4910                                             int *par_num, bool print)
4911 {
4912         u32 cur_bit;
4913         bool res;
4914         int i;
4915
4916         res = false;
4917
4918         for (i = 0; sig; i++) {
4919                 cur_bit = (0x1UL << i);
4920                 if (sig & cur_bit) {
4921                         res = true; /* Each bit is real error! */
4922                         if (print) {
4923                                 switch (cur_bit) {
4924                                 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4925                                         _print_next_block((*par_num)++,
4926                                                           "PGLUE_B");
4927                                         _print_parity(bp,
4928                                                       PGLUE_B_REG_PGLUE_B_PRTY_STS);
4929                                         break;
4930                                 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4931                                         _print_next_block((*par_num)++, "ATC");
4932                                         _print_parity(bp,
4933                                                       ATC_REG_ATC_PRTY_STS);
4934                                         break;
4935                                 }
4936                         }
4937                         /* Clear the bit */
4938                         sig &= ~cur_bit;
4939                 }
4940         }
4941
4942         return res;
4943 }
4944
4945 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4946                               u32 *sig)
4947 {
4948         bool res = false;
4949
4950         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4951             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4952             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4953             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4954             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4955                 int par_num = 0;
4956
4957                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4958                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4959                           sig[0] & HW_PRTY_ASSERT_SET_0,
4960                           sig[1] & HW_PRTY_ASSERT_SET_1,
4961                           sig[2] & HW_PRTY_ASSERT_SET_2,
4962                           sig[3] & HW_PRTY_ASSERT_SET_3,
4963                           sig[4] & HW_PRTY_ASSERT_SET_4);
4964                 if (print) {
4965                         if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4966                              (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4967                              (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4968                              (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4969                              (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4970                                 netdev_err(bp->dev,
4971                                            "Parity errors detected in blocks: ");
4972                         } else {
4973                                 print = false;
4974                         }
4975                 }
4976                 res |= bnx2x_check_blocks_with_parity0(bp,
4977                         sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
4978                 res |= bnx2x_check_blocks_with_parity1(bp,
4979                         sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
4980                 res |= bnx2x_check_blocks_with_parity2(bp,
4981                         sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
4982                 res |= bnx2x_check_blocks_with_parity3(bp,
4983                         sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
4984                 res |= bnx2x_check_blocks_with_parity4(bp,
4985                         sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
4986
4987                 if (print)
4988                         pr_cont("\n");
4989         }
4990
4991         return res;
4992 }
4993
4994 /**
4995  * bnx2x_chk_parity_attn - checks for parity attentions.
4996  *
4997  * @bp:         driver handle
4998  * @global:     true if there was a global attention
4999  * @print:      show parity attention in syslog
5000  */
5001 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5002 {
5003         struct attn_route attn = { {0} };
5004         int port = BP_PORT(bp);
5005
5006         attn.sig[0] = REG_RD(bp,
5007                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5008                              port*4);
5009         attn.sig[1] = REG_RD(bp,
5010                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5011                              port*4);
5012         attn.sig[2] = REG_RD(bp,
5013                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5014                              port*4);
5015         attn.sig[3] = REG_RD(bp,
5016                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5017                              port*4);
5018         /* Since MCP attentions can't be disabled inside the block, we need to
5019          * read AEU registers to see whether they're currently disabled
5020          */
5021         attn.sig[3] &= ((REG_RD(bp,
5022                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5023                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5024                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5025                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5026
5027         if (!CHIP_IS_E1x(bp))
5028                 attn.sig[4] = REG_RD(bp,
5029                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5030                                      port*4);
5031
5032         return bnx2x_parity_attn(bp, global, print, attn.sig);
5033 }
5034
5035 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5036 {
5037         u32 val;
5038         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5039
5040                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5041                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5042                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5043                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5044                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5045                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5046                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5047                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5048                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5049                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5050                 if (val &
5051                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5052                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5053                 if (val &
5054                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5055                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5056                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5057                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5058                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5059                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5060                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5061                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5062         }
5063         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5064                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5065                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5066                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5067                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5068                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5069                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5070                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5071                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5072                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5073                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5074                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5075                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5076                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5077                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5078         }
5079
5080         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5081                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5082                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5083                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5084                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5085         }
5086 }
5087
5088 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5089 {
5090         struct attn_route attn, *group_mask;
5091         int port = BP_PORT(bp);
5092         int index;
5093         u32 reg_addr;
5094         u32 val;
5095         u32 aeu_mask;
5096         bool global = false;
5097
5098         /* need to take HW lock because MCP or other port might also
5099            try to handle this event */
5100         bnx2x_acquire_alr(bp);
5101
5102         if (bnx2x_chk_parity_attn(bp, &global, true)) {
5103 #ifndef BNX2X_STOP_ON_ERROR
5104                 bp->recovery_state = BNX2X_RECOVERY_INIT;
5105                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5106                 /* Disable HW interrupts */
5107                 bnx2x_int_disable(bp);
5108                 /* In case of parity errors don't handle attentions so that
5109                  * other function would "see" parity errors.
5110                  */
5111 #else
5112                 bnx2x_panic();
5113 #endif
5114                 bnx2x_release_alr(bp);
5115                 return;
5116         }
5117
5118         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5119         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5120         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5121         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5122         if (!CHIP_IS_E1x(bp))
5123                 attn.sig[4] =
5124                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5125         else
5126                 attn.sig[4] = 0;
5127
5128         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5129            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5130
5131         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5132                 if (deasserted & (1 << index)) {
5133                         group_mask = &bp->attn_group[index];
5134
5135                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5136                            index,
5137                            group_mask->sig[0], group_mask->sig[1],
5138                            group_mask->sig[2], group_mask->sig[3],
5139                            group_mask->sig[4]);
5140
5141                         bnx2x_attn_int_deasserted4(bp,
5142                                         attn.sig[4] & group_mask->sig[4]);
5143                         bnx2x_attn_int_deasserted3(bp,
5144                                         attn.sig[3] & group_mask->sig[3]);
5145                         bnx2x_attn_int_deasserted1(bp,
5146                                         attn.sig[1] & group_mask->sig[1]);
5147                         bnx2x_attn_int_deasserted2(bp,
5148                                         attn.sig[2] & group_mask->sig[2]);
5149                         bnx2x_attn_int_deasserted0(bp,
5150                                         attn.sig[0] & group_mask->sig[0]);
5151                 }
5152         }
5153
5154         bnx2x_release_alr(bp);
5155
5156         if (bp->common.int_block == INT_BLOCK_HC)
5157                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5158                             COMMAND_REG_ATTN_BITS_CLR);
5159         else
5160                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5161
5162         val = ~deasserted;
5163         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5164            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5165         REG_WR(bp, reg_addr, val);
5166
5167         if (~bp->attn_state & deasserted)
5168                 BNX2X_ERR("IGU ERROR\n");
5169
5170         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5171                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
5172
5173         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5174         aeu_mask = REG_RD(bp, reg_addr);
5175
5176         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
5177            aeu_mask, deasserted);
5178         aeu_mask |= (deasserted & 0x3ff);
5179         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5180
5181         REG_WR(bp, reg_addr, aeu_mask);
5182         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5183
5184         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5185         bp->attn_state &= ~deasserted;
5186         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5187 }
5188
5189 static void bnx2x_attn_int(struct bnx2x *bp)
5190 {
5191         /* read local copy of bits */
5192         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5193                                                                 attn_bits);
5194         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5195                                                                 attn_bits_ack);
5196         u32 attn_state = bp->attn_state;
5197
5198         /* look for changed bits */
5199         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
5200         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
5201
5202         DP(NETIF_MSG_HW,
5203            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
5204            attn_bits, attn_ack, asserted, deasserted);
5205
5206         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5207                 BNX2X_ERR("BAD attention state\n");
5208
5209         /* handle bits that were raised */
5210         if (asserted)
5211                 bnx2x_attn_int_asserted(bp, asserted);
5212
5213         if (deasserted)
5214                 bnx2x_attn_int_deasserted(bp, deasserted);
5215 }
5216
5217 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5218                       u16 index, u8 op, u8 update)
5219 {
5220         u32 igu_addr = bp->igu_base_addr;
5221         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5222         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5223                              igu_addr);
5224 }
5225
5226 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5227 {
5228         /* No memory barriers */
5229         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5230         mmiowb(); /* keep prod updates ordered */
5231 }
5232
5233 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5234                                       union event_ring_elem *elem)
5235 {
5236         u8 err = elem->message.error;
5237
5238         if (!bp->cnic_eth_dev.starting_cid  ||
5239             (cid < bp->cnic_eth_dev.starting_cid &&
5240             cid != bp->cnic_eth_dev.iscsi_l2_cid))
5241                 return 1;
5242
5243         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5244
5245         if (unlikely(err)) {
5246
5247                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5248                           cid);
5249                 bnx2x_panic_dump(bp, false);
5250         }
5251         bnx2x_cnic_cfc_comp(bp, cid, err);
5252         return 0;
5253 }
5254
5255 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5256 {
5257         struct bnx2x_mcast_ramrod_params rparam;
5258         int rc;
5259
5260         memset(&rparam, 0, sizeof(rparam));
5261
5262         rparam.mcast_obj = &bp->mcast_obj;
5263
5264         netif_addr_lock_bh(bp->dev);
5265
5266         /* Clear pending state for the last command */
5267         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5268
5269         /* If there are pending mcast commands - send them */
5270         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5271                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5272                 if (rc < 0)
5273                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5274                                   rc);
5275         }
5276
5277         netif_addr_unlock_bh(bp->dev);
5278 }
5279
5280 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5281                                             union event_ring_elem *elem)
5282 {
5283         unsigned long ramrod_flags = 0;
5284         int rc = 0;
5285         u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5286         u32 cid = echo & BNX2X_SWCID_MASK;
5287         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5288
5289         /* Always push next commands out, don't wait here */
5290         __set_bit(RAMROD_CONT, &ramrod_flags);
5291
5292         switch (echo >> BNX2X_SWCID_SHIFT) {
5293         case BNX2X_FILTER_MAC_PENDING:
5294                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5295                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5296                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5297                 else
5298                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5299
5300                 break;
5301         case BNX2X_FILTER_VLAN_PENDING:
5302                 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5303                 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5304                 break;
5305         case BNX2X_FILTER_MCAST_PENDING:
5306                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5307                 /* This is only relevant for 57710 where multicast MACs are
5308                  * configured as unicast MACs using the same ramrod.
5309                  */
5310                 bnx2x_handle_mcast_eqe(bp);
5311                 return;
5312         default:
5313                 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5314                 return;
5315         }
5316
5317         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5318
5319         if (rc < 0)
5320                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5321         else if (rc > 0)
5322                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5323 }
5324
5325 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5326
5327 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5328 {
5329         netif_addr_lock_bh(bp->dev);
5330
5331         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5332
5333         /* Send rx_mode command again if was requested */
5334         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5335                 bnx2x_set_storm_rx_mode(bp);
5336         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5337                                     &bp->sp_state))
5338                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5339         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5340                                     &bp->sp_state))
5341                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5342
5343         netif_addr_unlock_bh(bp->dev);
5344 }
5345
5346 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5347                                               union event_ring_elem *elem)
5348 {
5349         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5350                 DP(BNX2X_MSG_SP,
5351                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5352                    elem->message.data.vif_list_event.func_bit_map);
5353                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5354                         elem->message.data.vif_list_event.func_bit_map);
5355         } else if (elem->message.data.vif_list_event.echo ==
5356                    VIF_LIST_RULE_SET) {
5357                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5358                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5359         }
5360 }
5361
5362 /* called with rtnl_lock */
5363 static void bnx2x_after_function_update(struct bnx2x *bp)
5364 {
5365         int q, rc;
5366         struct bnx2x_fastpath *fp;
5367         struct bnx2x_queue_state_params queue_params = {NULL};
5368         struct bnx2x_queue_update_params *q_update_params =
5369                 &queue_params.params.update;
5370
5371         /* Send Q update command with afex vlan removal values for all Qs */
5372         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5373
5374         /* set silent vlan removal values according to vlan mode */
5375         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5376                   &q_update_params->update_flags);
5377         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5378                   &q_update_params->update_flags);
5379         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5380
5381         /* in access mode mark mask and value are 0 to strip all vlans */
5382         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5383                 q_update_params->silent_removal_value = 0;
5384                 q_update_params->silent_removal_mask = 0;
5385         } else {
5386                 q_update_params->silent_removal_value =
5387                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5388                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5389         }
5390
5391         for_each_eth_queue(bp, q) {
5392                 /* Set the appropriate Queue object */
5393                 fp = &bp->fp[q];
5394                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5395
5396                 /* send the ramrod */
5397                 rc = bnx2x_queue_state_change(bp, &queue_params);
5398                 if (rc < 0)
5399                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5400                                   q);
5401         }
5402
5403         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5404                 fp = &bp->fp[FCOE_IDX(bp)];
5405                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5406
5407                 /* clear pending completion bit */
5408                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5409
5410                 /* mark latest Q bit */
5411                 smp_mb__before_atomic();
5412                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5413                 smp_mb__after_atomic();
5414
5415                 /* send Q update ramrod for FCoE Q */
5416                 rc = bnx2x_queue_state_change(bp, &queue_params);
5417                 if (rc < 0)
5418                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5419                                   q);
5420         } else {
5421                 /* If no FCoE ring - ACK MCP now */
5422                 bnx2x_link_report(bp);
5423                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5424         }
5425 }
5426
5427 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5428         struct bnx2x *bp, u32 cid)
5429 {
5430         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5431
5432         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5433                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5434         else
5435                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5436 }
5437
5438 static void bnx2x_eq_int(struct bnx2x *bp)
5439 {
5440         u16 hw_cons, sw_cons, sw_prod;
5441         union event_ring_elem *elem;
5442         u8 echo;
5443         u32 cid;
5444         u8 opcode;
5445         int rc, spqe_cnt = 0;
5446         struct bnx2x_queue_sp_obj *q_obj;
5447         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5448         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5449
5450         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5451
5452         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5453          * when we get the next-page we need to adjust so the loop
5454          * condition below will be met. The next element is the size of a
5455          * regular element and hence incrementing by 1
5456          */
5457         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5458                 hw_cons++;
5459
5460         /* This function may never run in parallel with itself for a
5461          * specific bp, thus there is no need in "paired" read memory
5462          * barrier here.
5463          */
5464         sw_cons = bp->eq_cons;
5465         sw_prod = bp->eq_prod;
5466
5467         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5468                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5469
5470         for (; sw_cons != hw_cons;
5471               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5472
5473                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5474
5475                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5476                 if (!rc) {
5477                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5478                            rc);
5479                         goto next_spqe;
5480                 }
5481
5482                 opcode = elem->message.opcode;
5483
5484                 /* handle eq element */
5485                 switch (opcode) {
5486                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5487                         bnx2x_vf_mbx_schedule(bp,
5488                                               &elem->message.data.vf_pf_event);
5489                         continue;
5490
5491                 case EVENT_RING_OPCODE_STAT_QUERY:
5492                         DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5493                                "got statistics comp event %d\n",
5494                                bp->stats_comp++);
5495                         /* nothing to do with stats comp */
5496                         goto next_spqe;
5497
5498                 case EVENT_RING_OPCODE_CFC_DEL:
5499                         /* handle according to cid range */
5500                         /*
5501                          * we may want to verify here that the bp state is
5502                          * HALTING
5503                          */
5504
5505                         /* elem CID originates from FW; actually LE */
5506                         cid = SW_CID(elem->message.data.cfc_del_event.cid);
5507
5508                         DP(BNX2X_MSG_SP,
5509                            "got delete ramrod for MULTI[%d]\n", cid);
5510
5511                         if (CNIC_LOADED(bp) &&
5512                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5513                                 goto next_spqe;
5514
5515                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5516
5517                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5518                                 break;
5519
5520                         goto next_spqe;
5521
5522                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5523                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5524                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5525                         if (f_obj->complete_cmd(bp, f_obj,
5526                                                 BNX2X_F_CMD_TX_STOP))
5527                                 break;
5528                         goto next_spqe;
5529
5530                 case EVENT_RING_OPCODE_START_TRAFFIC:
5531                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5532                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5533                         if (f_obj->complete_cmd(bp, f_obj,
5534                                                 BNX2X_F_CMD_TX_START))
5535                                 break;
5536                         goto next_spqe;
5537
5538                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5539                         echo = elem->message.data.function_update_event.echo;
5540                         if (echo == SWITCH_UPDATE) {
5541                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5542                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5543                                 if (f_obj->complete_cmd(
5544                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5545                                         break;
5546
5547                         } else {
5548                                 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5549
5550                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5551                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5552                                 f_obj->complete_cmd(bp, f_obj,
5553                                                     BNX2X_F_CMD_AFEX_UPDATE);
5554
5555                                 /* We will perform the Queues update from
5556                                  * sp_rtnl task as all Queue SP operations
5557                                  * should run under rtnl_lock.
5558                                  */
5559                                 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5560                         }
5561
5562                         goto next_spqe;
5563
5564                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5565                         f_obj->complete_cmd(bp, f_obj,
5566                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5567                         bnx2x_after_afex_vif_lists(bp, elem);
5568                         goto next_spqe;
5569                 case EVENT_RING_OPCODE_FUNCTION_START:
5570                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5571                            "got FUNC_START ramrod\n");
5572                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5573                                 break;
5574
5575                         goto next_spqe;
5576
5577                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5578                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5579                            "got FUNC_STOP ramrod\n");
5580                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5581                                 break;
5582
5583                         goto next_spqe;
5584
5585                 case EVENT_RING_OPCODE_SET_TIMESYNC:
5586                         DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5587                            "got set_timesync ramrod completion\n");
5588                         if (f_obj->complete_cmd(bp, f_obj,
5589                                                 BNX2X_F_CMD_SET_TIMESYNC))
5590                                 break;
5591                         goto next_spqe;
5592                 }
5593
5594                 switch (opcode | bp->state) {
5595                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5596                       BNX2X_STATE_OPEN):
5597                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598                       BNX2X_STATE_OPENING_WAIT4_PORT):
5599                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5600                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5601                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5602                            SW_CID(elem->message.data.eth_event.echo));
5603                         rss_raw->clear_pending(rss_raw);
5604                         break;
5605
5606                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5607                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5608                 case (EVENT_RING_OPCODE_SET_MAC |
5609                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5610                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5611                       BNX2X_STATE_OPEN):
5612                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5613                       BNX2X_STATE_DIAG):
5614                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5615                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5616                         DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5617                         bnx2x_handle_classification_eqe(bp, elem);
5618                         break;
5619
5620                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5621                       BNX2X_STATE_OPEN):
5622                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5623                       BNX2X_STATE_DIAG):
5624                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5625                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5626                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5627                         bnx2x_handle_mcast_eqe(bp);
5628                         break;
5629
5630                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5631                       BNX2X_STATE_OPEN):
5632                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5633                       BNX2X_STATE_DIAG):
5634                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5635                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5636                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5637                         bnx2x_handle_rx_mode_eqe(bp);
5638                         break;
5639                 default:
5640                         /* unknown event log error and continue */
5641                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5642                                   elem->message.opcode, bp->state);
5643                 }
5644 next_spqe:
5645                 spqe_cnt++;
5646         } /* for */
5647
5648         smp_mb__before_atomic();
5649         atomic_add(spqe_cnt, &bp->eq_spq_left);
5650
5651         bp->eq_cons = sw_cons;
5652         bp->eq_prod = sw_prod;
5653         /* Make sure that above mem writes were issued towards the memory */
5654         smp_wmb();
5655
5656         /* update producer */
5657         bnx2x_update_eq_prod(bp, bp->eq_prod);
5658 }
5659
5660 static void bnx2x_sp_task(struct work_struct *work)
5661 {
5662         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5663
5664         DP(BNX2X_MSG_SP, "sp task invoked\n");
5665
5666         /* make sure the atomic interrupt_occurred has been written */
5667         smp_rmb();
5668         if (atomic_read(&bp->interrupt_occurred)) {
5669
5670                 /* what work needs to be performed? */
5671                 u16 status = bnx2x_update_dsb_idx(bp);
5672
5673                 DP(BNX2X_MSG_SP, "status %x\n", status);
5674                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5675                 atomic_set(&bp->interrupt_occurred, 0);
5676
5677                 /* HW attentions */
5678                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5679                         bnx2x_attn_int(bp);
5680                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5681                 }
5682
5683                 /* SP events: STAT_QUERY and others */
5684                 if (status & BNX2X_DEF_SB_IDX) {
5685                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5686
5687                         if (FCOE_INIT(bp) &&
5688                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5689                                 /* Prevent local bottom-halves from running as
5690                                  * we are going to change the local NAPI list.
5691                                  */
5692                                 local_bh_disable();
5693                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5694                                 local_bh_enable();
5695                         }
5696
5697                         /* Handle EQ completions */
5698                         bnx2x_eq_int(bp);
5699                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5700                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5701
5702                         status &= ~BNX2X_DEF_SB_IDX;
5703                 }
5704
5705                 /* if status is non zero then perhaps something went wrong */
5706                 if (unlikely(status))
5707                         DP(BNX2X_MSG_SP,
5708                            "got an unknown interrupt! (status 0x%x)\n", status);
5709
5710                 /* ack status block only if something was actually handled */
5711                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5712                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5713         }
5714
5715         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5716         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5717                                &bp->sp_state)) {
5718                 bnx2x_link_report(bp);
5719                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5720         }
5721 }
5722
5723 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5724 {
5725         struct net_device *dev = dev_instance;
5726         struct bnx2x *bp = netdev_priv(dev);
5727
5728         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5729                      IGU_INT_DISABLE, 0);
5730
5731 #ifdef BNX2X_STOP_ON_ERROR
5732         if (unlikely(bp->panic))
5733                 return IRQ_HANDLED;
5734 #endif
5735
5736         if (CNIC_LOADED(bp)) {
5737                 struct cnic_ops *c_ops;
5738
5739                 rcu_read_lock();
5740                 c_ops = rcu_dereference(bp->cnic_ops);
5741                 if (c_ops)
5742                         c_ops->cnic_handler(bp->cnic_data, NULL);
5743                 rcu_read_unlock();
5744         }
5745
5746         /* schedule sp task to perform default status block work, ack
5747          * attentions and enable interrupts.
5748          */
5749         bnx2x_schedule_sp_task(bp);
5750
5751         return IRQ_HANDLED;
5752 }
5753
5754 /* end of slow path */
5755
5756 void bnx2x_drv_pulse(struct bnx2x *bp)
5757 {
5758         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5759                  bp->fw_drv_pulse_wr_seq);
5760 }
5761
5762 static void bnx2x_timer(unsigned long data)
5763 {
5764         struct bnx2x *bp = (struct bnx2x *) data;
5765
5766         if (!netif_running(bp->dev))
5767                 return;
5768
5769         if (IS_PF(bp) &&
5770             !BP_NOMCP(bp)) {
5771                 int mb_idx = BP_FW_MB_IDX(bp);
5772                 u16 drv_pulse;
5773                 u16 mcp_pulse;
5774
5775                 ++bp->fw_drv_pulse_wr_seq;
5776                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5777                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5778                 bnx2x_drv_pulse(bp);
5779
5780                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5781                              MCP_PULSE_SEQ_MASK);
5782                 /* The delta between driver pulse and mcp response
5783                  * should not get too big. If the MFW is more than 5 pulses
5784                  * behind, we should worry about it enough to generate an error
5785                  * log.
5786                  */
5787                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5788                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5789                                   drv_pulse, mcp_pulse);
5790         }
5791
5792         if (bp->state == BNX2X_STATE_OPEN)
5793                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5794
5795         /* sample pf vf bulletin board for new posts from pf */
5796         if (IS_VF(bp))
5797                 bnx2x_timer_sriov(bp);
5798
5799         mod_timer(&bp->timer, jiffies + bp->current_interval);
5800 }
5801
5802 /* end of Statistics */
5803
5804 /* nic init */
5805
5806 /*
5807  * nic init service functions
5808  */
5809
5810 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5811 {
5812         u32 i;
5813         if (!(len%4) && !(addr%4))
5814                 for (i = 0; i < len; i += 4)
5815                         REG_WR(bp, addr + i, fill);
5816         else
5817                 for (i = 0; i < len; i++)
5818                         REG_WR8(bp, addr + i, fill);
5819 }
5820
5821 /* helper: writes FP SP data to FW - data_size in dwords */
5822 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5823                                 int fw_sb_id,
5824                                 u32 *sb_data_p,
5825                                 u32 data_size)
5826 {
5827         int index;
5828         for (index = 0; index < data_size; index++)
5829                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5830                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5831                         sizeof(u32)*index,
5832                         *(sb_data_p + index));
5833 }
5834
5835 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5836 {
5837         u32 *sb_data_p;
5838         u32 data_size = 0;
5839         struct hc_status_block_data_e2 sb_data_e2;
5840         struct hc_status_block_data_e1x sb_data_e1x;
5841
5842         /* disable the function first */
5843         if (!CHIP_IS_E1x(bp)) {
5844                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5845                 sb_data_e2.common.state = SB_DISABLED;
5846                 sb_data_e2.common.p_func.vf_valid = false;
5847                 sb_data_p = (u32 *)&sb_data_e2;
5848                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5849         } else {
5850                 memset(&sb_data_e1x, 0,
5851                        sizeof(struct hc_status_block_data_e1x));
5852                 sb_data_e1x.common.state = SB_DISABLED;
5853                 sb_data_e1x.common.p_func.vf_valid = false;
5854                 sb_data_p = (u32 *)&sb_data_e1x;
5855                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5856         }
5857         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5858
5859         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5860                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5861                         CSTORM_STATUS_BLOCK_SIZE);
5862         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5863                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5864                         CSTORM_SYNC_BLOCK_SIZE);
5865 }
5866
5867 /* helper:  writes SP SB data to FW */
5868 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5869                 struct hc_sp_status_block_data *sp_sb_data)
5870 {
5871         int func = BP_FUNC(bp);
5872         int i;
5873         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5874                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5875                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5876                         i*sizeof(u32),
5877                         *((u32 *)sp_sb_data + i));
5878 }
5879
5880 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5881 {
5882         int func = BP_FUNC(bp);
5883         struct hc_sp_status_block_data sp_sb_data;
5884         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5885
5886         sp_sb_data.state = SB_DISABLED;
5887         sp_sb_data.p_func.vf_valid = false;
5888
5889         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5890
5891         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5892                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5893                         CSTORM_SP_STATUS_BLOCK_SIZE);
5894         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5895                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5896                         CSTORM_SP_SYNC_BLOCK_SIZE);
5897 }
5898
5899 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5900                                            int igu_sb_id, int igu_seg_id)
5901 {
5902         hc_sm->igu_sb_id = igu_sb_id;
5903         hc_sm->igu_seg_id = igu_seg_id;
5904         hc_sm->timer_value = 0xFF;
5905         hc_sm->time_to_expire = 0xFFFFFFFF;
5906 }
5907
5908 /* allocates state machine ids. */
5909 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5910 {
5911         /* zero out state machine indices */
5912         /* rx indices */
5913         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5914
5915         /* tx indices */
5916         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5917         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5918         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5919         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5920
5921         /* map indices */
5922         /* rx indices */
5923         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5924                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5925
5926         /* tx indices */
5927         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5928                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5929         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5930                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5931         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5932                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5933         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5934                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5935 }
5936
5937 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5938                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5939 {
5940         int igu_seg_id;
5941
5942         struct hc_status_block_data_e2 sb_data_e2;
5943         struct hc_status_block_data_e1x sb_data_e1x;
5944         struct hc_status_block_sm  *hc_sm_p;
5945         int data_size;
5946         u32 *sb_data_p;
5947
5948         if (CHIP_INT_MODE_IS_BC(bp))
5949                 igu_seg_id = HC_SEG_ACCESS_NORM;
5950         else
5951                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5952
5953         bnx2x_zero_fp_sb(bp, fw_sb_id);
5954
5955         if (!CHIP_IS_E1x(bp)) {
5956                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5957                 sb_data_e2.common.state = SB_ENABLED;
5958                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5959                 sb_data_e2.common.p_func.vf_id = vfid;
5960                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5961                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5962                 sb_data_e2.common.same_igu_sb_1b = true;
5963                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5964                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5965                 hc_sm_p = sb_data_e2.common.state_machine;
5966                 sb_data_p = (u32 *)&sb_data_e2;
5967                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5968                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5969         } else {
5970                 memset(&sb_data_e1x, 0,
5971                        sizeof(struct hc_status_block_data_e1x));
5972                 sb_data_e1x.common.state = SB_ENABLED;
5973                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5974                 sb_data_e1x.common.p_func.vf_id = 0xff;
5975                 sb_data_e1x.common.p_func.vf_valid = false;
5976                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5977                 sb_data_e1x.common.same_igu_sb_1b = true;
5978                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5979                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5980                 hc_sm_p = sb_data_e1x.common.state_machine;
5981                 sb_data_p = (u32 *)&sb_data_e1x;
5982                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5983                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5984         }
5985
5986         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5987                                        igu_sb_id, igu_seg_id);
5988         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5989                                        igu_sb_id, igu_seg_id);
5990
5991         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5992
5993         /* write indices to HW - PCI guarantees endianity of regpairs */
5994         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5995 }
5996
5997 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5998                                      u16 tx_usec, u16 rx_usec)
5999 {
6000         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6001                                     false, rx_usec);
6002         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6003                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6004                                        tx_usec);
6005         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6006                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6007                                        tx_usec);
6008         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6009                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6010                                        tx_usec);
6011 }
6012
6013 static void bnx2x_init_def_sb(struct bnx2x *bp)
6014 {
6015         struct host_sp_status_block *def_sb = bp->def_status_blk;
6016         dma_addr_t mapping = bp->def_status_blk_mapping;
6017         int igu_sp_sb_index;
6018         int igu_seg_id;
6019         int port = BP_PORT(bp);
6020         int func = BP_FUNC(bp);
6021         int reg_offset, reg_offset_en5;
6022         u64 section;
6023         int index;
6024         struct hc_sp_status_block_data sp_sb_data;
6025         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6026
6027         if (CHIP_INT_MODE_IS_BC(bp)) {
6028                 igu_sp_sb_index = DEF_SB_IGU_ID;
6029                 igu_seg_id = HC_SEG_ACCESS_DEF;
6030         } else {
6031                 igu_sp_sb_index = bp->igu_dsb_id;
6032                 igu_seg_id = IGU_SEG_ACCESS_DEF;
6033         }
6034
6035         /* ATTN */
6036         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6037                                             atten_status_block);
6038         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6039
6040         bp->attn_state = 0;
6041
6042         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6043                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6044         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6045                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6046         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6047                 int sindex;
6048                 /* take care of sig[0]..sig[4] */
6049                 for (sindex = 0; sindex < 4; sindex++)
6050                         bp->attn_group[index].sig[sindex] =
6051                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6052
6053                 if (!CHIP_IS_E1x(bp))
6054                         /*
6055                          * enable5 is separate from the rest of the registers,
6056                          * and therefore the address skip is 4
6057                          * and not 16 between the different groups
6058                          */
6059                         bp->attn_group[index].sig[4] = REG_RD(bp,
6060                                         reg_offset_en5 + 0x4*index);
6061                 else
6062                         bp->attn_group[index].sig[4] = 0;
6063         }
6064
6065         if (bp->common.int_block == INT_BLOCK_HC) {
6066                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6067                                      HC_REG_ATTN_MSG0_ADDR_L);
6068
6069                 REG_WR(bp, reg_offset, U64_LO(section));
6070                 REG_WR(bp, reg_offset + 4, U64_HI(section));
6071         } else if (!CHIP_IS_E1x(bp)) {
6072                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6073                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6074         }
6075
6076         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6077                                             sp_sb);
6078
6079         bnx2x_zero_sp_sb(bp);
6080
6081         /* PCI guarantees endianity of regpairs */
6082         sp_sb_data.state                = SB_ENABLED;
6083         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
6084         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
6085         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
6086         sp_sb_data.igu_seg_id           = igu_seg_id;
6087         sp_sb_data.p_func.pf_id         = func;
6088         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
6089         sp_sb_data.p_func.vf_id         = 0xff;
6090
6091         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6092
6093         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6094 }
6095
6096 void bnx2x_update_coalesce(struct bnx2x *bp)
6097 {
6098         int i;
6099
6100         for_each_eth_queue(bp, i)
6101                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6102                                          bp->tx_ticks, bp->rx_ticks);
6103 }
6104
6105 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6106 {
6107         spin_lock_init(&bp->spq_lock);
6108         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6109
6110         bp->spq_prod_idx = 0;
6111         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6112         bp->spq_prod_bd = bp->spq;
6113         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6114 }
6115
6116 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6117 {
6118         int i;
6119         for (i = 1; i <= NUM_EQ_PAGES; i++) {
6120                 union event_ring_elem *elem =
6121                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6122
6123                 elem->next_page.addr.hi =
6124                         cpu_to_le32(U64_HI(bp->eq_mapping +
6125                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6126                 elem->next_page.addr.lo =
6127                         cpu_to_le32(U64_LO(bp->eq_mapping +
6128                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6129         }
6130         bp->eq_cons = 0;
6131         bp->eq_prod = NUM_EQ_DESC;
6132         bp->eq_cons_sb = BNX2X_EQ_INDEX;
6133         /* we want a warning message before it gets wrought... */
6134         atomic_set(&bp->eq_spq_left,
6135                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6136 }
6137
6138 /* called with netif_addr_lock_bh() */
6139 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6140                                unsigned long rx_mode_flags,
6141                                unsigned long rx_accept_flags,
6142                                unsigned long tx_accept_flags,
6143                                unsigned long ramrod_flags)
6144 {
6145         struct bnx2x_rx_mode_ramrod_params ramrod_param;
6146         int rc;
6147
6148         memset(&ramrod_param, 0, sizeof(ramrod_param));
6149
6150         /* Prepare ramrod parameters */
6151         ramrod_param.cid = 0;
6152         ramrod_param.cl_id = cl_id;
6153         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6154         ramrod_param.func_id = BP_FUNC(bp);
6155
6156         ramrod_param.pstate = &bp->sp_state;
6157         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6158
6159         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6160         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6161
6162         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6163
6164         ramrod_param.ramrod_flags = ramrod_flags;
6165         ramrod_param.rx_mode_flags = rx_mode_flags;
6166
6167         ramrod_param.rx_accept_flags = rx_accept_flags;
6168         ramrod_param.tx_accept_flags = tx_accept_flags;
6169
6170         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6171         if (rc < 0) {
6172                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6173                 return rc;
6174         }
6175
6176         return 0;
6177 }
6178
6179 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6180                                    unsigned long *rx_accept_flags,
6181                                    unsigned long *tx_accept_flags)
6182 {
6183         /* Clear the flags first */
6184         *rx_accept_flags = 0;
6185         *tx_accept_flags = 0;
6186
6187         switch (rx_mode) {
6188         case BNX2X_RX_MODE_NONE:
6189                 /*
6190                  * 'drop all' supersedes any accept flags that may have been
6191                  * passed to the function.
6192                  */
6193                 break;
6194         case BNX2X_RX_MODE_NORMAL:
6195                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6196                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6197                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6198
6199                 /* internal switching mode */
6200                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6201                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6202                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6203
6204                 if (bp->accept_any_vlan) {
6205                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6206                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6207                 }
6208
6209                 break;
6210         case BNX2X_RX_MODE_ALLMULTI:
6211                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6212                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6213                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6214
6215                 /* internal switching mode */
6216                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6217                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6218                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6219
6220                 if (bp->accept_any_vlan) {
6221                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6222                         __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6223                 }
6224
6225                 break;
6226         case BNX2X_RX_MODE_PROMISC:
6227                 /* According to definition of SI mode, iface in promisc mode
6228                  * should receive matched and unmatched (in resolution of port)
6229                  * unicast packets.
6230                  */
6231                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6232                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6233                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6234                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6235
6236                 /* internal switching mode */
6237                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6238                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6239
6240                 if (IS_MF_SI(bp))
6241                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6242                 else
6243                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6244
6245                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6246                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6247
6248                 break;
6249         default:
6250                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6251                 return -EINVAL;
6252         }
6253
6254         return 0;
6255 }
6256
6257 /* called with netif_addr_lock_bh() */
6258 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6259 {
6260         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6261         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6262         int rc;
6263
6264         if (!NO_FCOE(bp))
6265                 /* Configure rx_mode of FCoE Queue */
6266                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6267
6268         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6269                                      &tx_accept_flags);
6270         if (rc)
6271                 return rc;
6272
6273         __set_bit(RAMROD_RX, &ramrod_flags);
6274         __set_bit(RAMROD_TX, &ramrod_flags);
6275
6276         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6277                                    rx_accept_flags, tx_accept_flags,
6278                                    ramrod_flags);
6279 }
6280
6281 static void bnx2x_init_internal_common(struct bnx2x *bp)
6282 {
6283         int i;
6284
6285         /* Zero this manually as its initialization is
6286            currently missing in the initTool */
6287         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6288                 REG_WR(bp, BAR_USTRORM_INTMEM +
6289                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
6290         if (!CHIP_IS_E1x(bp)) {
6291                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6292                         CHIP_INT_MODE_IS_BC(bp) ?
6293                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6294         }
6295 }
6296
6297 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6298 {
6299         switch (load_code) {
6300         case FW_MSG_CODE_DRV_LOAD_COMMON:
6301         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6302                 bnx2x_init_internal_common(bp);
6303                 /* no break */
6304
6305         case FW_MSG_CODE_DRV_LOAD_PORT:
6306                 /* nothing to do */
6307                 /* no break */
6308
6309         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6310                 /* internal memory per function is
6311                    initialized inside bnx2x_pf_init */
6312                 break;
6313
6314         default:
6315                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6316                 break;
6317         }
6318 }
6319
6320 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6321 {
6322         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6323 }
6324
6325 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6326 {
6327         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6328 }
6329
6330 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6331 {
6332         if (CHIP_IS_E1x(fp->bp))
6333                 return BP_L_ID(fp->bp) + fp->index;
6334         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6335                 return bnx2x_fp_igu_sb_id(fp);
6336 }
6337
6338 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6339 {
6340         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6341         u8 cos;
6342         unsigned long q_type = 0;
6343         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6344         fp->rx_queue = fp_idx;
6345         fp->cid = fp_idx;
6346         fp->cl_id = bnx2x_fp_cl_id(fp);
6347         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6348         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6349         /* qZone id equals to FW (per path) client id */
6350         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6351
6352         /* init shortcut */
6353         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6354
6355         /* Setup SB indices */
6356         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6357
6358         /* Configure Queue State object */
6359         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6360         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6361
6362         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6363
6364         /* init tx data */
6365         for_each_cos_in_tx_queue(fp, cos) {
6366                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6367                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6368                                   FP_COS_TO_TXQ(fp, cos, bp),
6369                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6370                 cids[cos] = fp->txdata_ptr[cos]->cid;
6371         }
6372
6373         /* nothing more for vf to do here */
6374         if (IS_VF(bp))
6375                 return;
6376
6377         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6378                       fp->fw_sb_id, fp->igu_sb_id);
6379         bnx2x_update_fpsb_idx(fp);
6380         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6381                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6382                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6383
6384         /**
6385          * Configure classification DBs: Always enable Tx switching
6386          */
6387         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6388
6389         DP(NETIF_MSG_IFUP,
6390            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6391            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6392            fp->igu_sb_id);
6393 }
6394
6395 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6396 {
6397         int i;
6398
6399         for (i = 1; i <= NUM_TX_RINGS; i++) {
6400                 struct eth_tx_next_bd *tx_next_bd =
6401                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6402
6403                 tx_next_bd->addr_hi =
6404                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6405                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6406                 tx_next_bd->addr_lo =
6407                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6408                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6409         }
6410
6411         *txdata->tx_cons_sb = cpu_to_le16(0);
6412
6413         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6414         txdata->tx_db.data.zero_fill1 = 0;
6415         txdata->tx_db.data.prod = 0;
6416
6417         txdata->tx_pkt_prod = 0;
6418         txdata->tx_pkt_cons = 0;
6419         txdata->tx_bd_prod = 0;
6420         txdata->tx_bd_cons = 0;
6421         txdata->tx_pkt = 0;
6422 }
6423
6424 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6425 {
6426         int i;
6427
6428         for_each_tx_queue_cnic(bp, i)
6429                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6430 }
6431
6432 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6433 {
6434         int i;
6435         u8 cos;
6436
6437         for_each_eth_queue(bp, i)
6438                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6439                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6440 }
6441
6442 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6443 {
6444         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6445         unsigned long q_type = 0;
6446
6447         bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6448         bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6449                                                      BNX2X_FCOE_ETH_CL_ID_IDX);
6450         bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6451         bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6452         bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6453         bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6454         bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6455                           fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6456                           fp);
6457
6458         DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6459
6460         /* qZone id equals to FW (per path) client id */
6461         bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6462         /* init shortcut */
6463         bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6464                 bnx2x_rx_ustorm_prods_offset(fp);
6465
6466         /* Configure Queue State object */
6467         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6468         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6469
6470         /* No multi-CoS for FCoE L2 client */
6471         BUG_ON(fp->max_cos != 1);
6472
6473         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6474                              &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6475                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6476
6477         DP(NETIF_MSG_IFUP,
6478            "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6479            fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6480            fp->igu_sb_id);
6481 }
6482
6483 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6484 {
6485         if (!NO_FCOE(bp))
6486                 bnx2x_init_fcoe_fp(bp);
6487
6488         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6489                       BNX2X_VF_ID_INVALID, false,
6490                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6491
6492         /* ensure status block indices were read */
6493         rmb();
6494         bnx2x_init_rx_rings_cnic(bp);
6495         bnx2x_init_tx_rings_cnic(bp);
6496
6497         /* flush all */
6498         mb();
6499         mmiowb();
6500 }
6501
6502 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6503 {
6504         int i;
6505
6506         /* Setup NIC internals and enable interrupts */
6507         for_each_eth_queue(bp, i)
6508                 bnx2x_init_eth_fp(bp, i);
6509
6510         /* ensure status block indices were read */
6511         rmb();
6512         bnx2x_init_rx_rings(bp);
6513         bnx2x_init_tx_rings(bp);
6514
6515         if (IS_PF(bp)) {
6516                 /* Initialize MOD_ABS interrupts */
6517                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6518                                        bp->common.shmem_base,
6519                                        bp->common.shmem2_base, BP_PORT(bp));
6520
6521                 /* initialize the default status block and sp ring */
6522                 bnx2x_init_def_sb(bp);
6523                 bnx2x_update_dsb_idx(bp);
6524                 bnx2x_init_sp_ring(bp);
6525         } else {
6526                 bnx2x_memset_stats(bp);
6527         }
6528 }
6529
6530 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6531 {
6532         bnx2x_init_eq_ring(bp);
6533         bnx2x_init_internal(bp, load_code);
6534         bnx2x_pf_init(bp);
6535         bnx2x_stats_init(bp);
6536
6537         /* flush all before enabling interrupts */
6538         mb();
6539         mmiowb();
6540
6541         bnx2x_int_enable(bp);
6542
6543         /* Check for SPIO5 */
6544         bnx2x_attn_int_deasserted0(bp,
6545                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6546                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6547 }
6548
6549 /* gzip service functions */
6550 static int bnx2x_gunzip_init(struct bnx2x *bp)
6551 {
6552         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6553                                             &bp->gunzip_mapping, GFP_KERNEL);
6554         if (bp->gunzip_buf  == NULL)
6555                 goto gunzip_nomem1;
6556
6557         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6558         if (bp->strm  == NULL)
6559                 goto gunzip_nomem2;
6560
6561         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6562         if (bp->strm->workspace == NULL)
6563                 goto gunzip_nomem3;
6564
6565         return 0;
6566
6567 gunzip_nomem3:
6568         kfree(bp->strm);
6569         bp->strm = NULL;
6570
6571 gunzip_nomem2:
6572         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6573                           bp->gunzip_mapping);
6574         bp->gunzip_buf = NULL;
6575
6576 gunzip_nomem1:
6577         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6578         return -ENOMEM;
6579 }
6580
6581 static void bnx2x_gunzip_end(struct bnx2x *bp)
6582 {
6583         if (bp->strm) {
6584                 vfree(bp->strm->workspace);
6585                 kfree(bp->strm);
6586                 bp->strm = NULL;
6587         }
6588
6589         if (bp->gunzip_buf) {
6590                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6591                                   bp->gunzip_mapping);
6592                 bp->gunzip_buf = NULL;
6593         }
6594 }
6595
6596 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6597 {
6598         int n, rc;
6599
6600         /* check gzip header */
6601         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6602                 BNX2X_ERR("Bad gzip header\n");
6603                 return -EINVAL;
6604         }
6605
6606         n = 10;
6607
6608 #define FNAME                           0x8
6609
6610         if (zbuf[3] & FNAME)
6611                 while ((zbuf[n++] != 0) && (n < len));
6612
6613         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6614         bp->strm->avail_in = len - n;
6615         bp->strm->next_out = bp->gunzip_buf;
6616         bp->strm->avail_out = FW_BUF_SIZE;
6617
6618         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6619         if (rc != Z_OK)
6620                 return rc;
6621
6622         rc = zlib_inflate(bp->strm, Z_FINISH);
6623         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6624                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6625                            bp->strm->msg);
6626
6627         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6628         if (bp->gunzip_outlen & 0x3)
6629                 netdev_err(bp->dev,
6630                            "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6631                                 bp->gunzip_outlen);
6632         bp->gunzip_outlen >>= 2;
6633
6634         zlib_inflateEnd(bp->strm);
6635
6636         if (rc == Z_STREAM_END)
6637                 return 0;
6638
6639         return rc;
6640 }
6641
6642 /* nic load/unload */
6643
6644 /*
6645  * General service functions
6646  */
6647
6648 /* send a NIG loopback debug packet */
6649 static void bnx2x_lb_pckt(struct bnx2x *bp)
6650 {
6651         u32 wb_write[3];
6652
6653         /* Ethernet source and destination addresses */
6654         wb_write[0] = 0x55555555;
6655         wb_write[1] = 0x55555555;
6656         wb_write[2] = 0x20;             /* SOP */
6657         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6658
6659         /* NON-IP protocol */
6660         wb_write[0] = 0x09000000;
6661         wb_write[1] = 0x55555555;
6662         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6663         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6664 }
6665
6666 /* some of the internal memories
6667  * are not directly readable from the driver
6668  * to test them we send debug packets
6669  */
6670 static int bnx2x_int_mem_test(struct bnx2x *bp)
6671 {
6672         int factor;
6673         int count, i;
6674         u32 val = 0;
6675
6676         if (CHIP_REV_IS_FPGA(bp))
6677                 factor = 120;
6678         else if (CHIP_REV_IS_EMUL(bp))
6679                 factor = 200;
6680         else
6681                 factor = 1;
6682
6683         /* Disable inputs of parser neighbor blocks */
6684         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6685         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6686         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6687         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6688
6689         /*  Write 0 to parser credits for CFC search request */
6690         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6691
6692         /* send Ethernet packet */
6693         bnx2x_lb_pckt(bp);
6694
6695         /* TODO do i reset NIG statistic? */
6696         /* Wait until NIG register shows 1 packet of size 0x10 */
6697         count = 1000 * factor;
6698         while (count) {
6699
6700                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6701                 val = *bnx2x_sp(bp, wb_data[0]);
6702                 if (val == 0x10)
6703                         break;
6704
6705                 usleep_range(10000, 20000);
6706                 count--;
6707         }
6708         if (val != 0x10) {
6709                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6710                 return -1;
6711         }
6712
6713         /* Wait until PRS register shows 1 packet */
6714         count = 1000 * factor;
6715         while (count) {
6716                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6717                 if (val == 1)
6718                         break;
6719
6720                 usleep_range(10000, 20000);
6721                 count--;
6722         }
6723         if (val != 0x1) {
6724                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6725                 return -2;
6726         }
6727
6728         /* Reset and init BRB, PRS */
6729         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6730         msleep(50);
6731         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6732         msleep(50);
6733         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6734         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6735
6736         DP(NETIF_MSG_HW, "part2\n");
6737
6738         /* Disable inputs of parser neighbor blocks */
6739         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6740         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6741         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6742         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6743
6744         /* Write 0 to parser credits for CFC search request */
6745         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6746
6747         /* send 10 Ethernet packets */
6748         for (i = 0; i < 10; i++)
6749                 bnx2x_lb_pckt(bp);
6750
6751         /* Wait until NIG register shows 10 + 1
6752            packets of size 11*0x10 = 0xb0 */
6753         count = 1000 * factor;
6754         while (count) {
6755
6756                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6757                 val = *bnx2x_sp(bp, wb_data[0]);
6758                 if (val == 0xb0)
6759                         break;
6760
6761                 usleep_range(10000, 20000);
6762                 count--;
6763         }
6764         if (val != 0xb0) {
6765                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6766                 return -3;
6767         }
6768
6769         /* Wait until PRS register shows 2 packets */
6770         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6771         if (val != 2)
6772                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6773
6774         /* Write 1 to parser credits for CFC search request */
6775         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6776
6777         /* Wait until PRS register shows 3 packets */
6778         msleep(10 * factor);
6779         /* Wait until NIG register shows 1 packet of size 0x10 */
6780         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6781         if (val != 3)
6782                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6783
6784         /* clear NIG EOP FIFO */
6785         for (i = 0; i < 11; i++)
6786                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6787         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6788         if (val != 1) {
6789                 BNX2X_ERR("clear of NIG failed\n");
6790                 return -4;
6791         }
6792
6793         /* Reset and init BRB, PRS, NIG */
6794         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6795         msleep(50);
6796         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6797         msleep(50);
6798         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6799         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6800         if (!CNIC_SUPPORT(bp))
6801                 /* set NIC mode */
6802                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6803
6804         /* Enable inputs of parser neighbor blocks */
6805         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6806         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6807         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6808         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6809
6810         DP(NETIF_MSG_HW, "done\n");
6811
6812         return 0; /* OK */
6813 }
6814
6815 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6816 {
6817         u32 val;
6818
6819         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6820         if (!CHIP_IS_E1x(bp))
6821                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6822         else
6823                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6824         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6825         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6826         /*
6827          * mask read length error interrupts in brb for parser
6828          * (parsing unit and 'checksum and crc' unit)
6829          * these errors are legal (PU reads fixed length and CAC can cause
6830          * read length error on truncated packets)
6831          */
6832         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6833         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6834         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6835         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6836         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6837         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6838 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6839 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6840         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6841         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6842         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6843 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6844 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6845         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6846         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6847         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6848         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6849 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6850 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6851
6852         val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
6853                 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6854                 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6855         if (!CHIP_IS_E1x(bp))
6856                 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6857                         PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6858         REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6859
6860         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6861         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6862         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6863 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6864
6865         if (!CHIP_IS_E1x(bp))
6866                 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6867                 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6868
6869         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6870         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6871 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6872         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
6873 }
6874
6875 static void bnx2x_reset_common(struct bnx2x *bp)
6876 {
6877         u32 val = 0x1400;
6878
6879         /* reset_common */
6880         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6881                0xd3ffff7f);
6882
6883         if (CHIP_IS_E3(bp)) {
6884                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6885                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6886         }
6887
6888         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6889 }
6890
6891 static void bnx2x_setup_dmae(struct bnx2x *bp)
6892 {
6893         bp->dmae_ready = 0;
6894         spin_lock_init(&bp->dmae_lock);
6895 }
6896
6897 static void bnx2x_init_pxp(struct bnx2x *bp)
6898 {
6899         u16 devctl;
6900         int r_order, w_order;
6901
6902         pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6903         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6904         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6905         if (bp->mrrs == -1)
6906                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6907         else {
6908                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6909                 r_order = bp->mrrs;
6910         }
6911
6912         bnx2x_init_pxp_arb(bp, r_order, w_order);
6913 }
6914
6915 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6916 {
6917         int is_required;
6918         u32 val;
6919         int port;
6920
6921         if (BP_NOMCP(bp))
6922                 return;
6923
6924         is_required = 0;
6925         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6926               SHARED_HW_CFG_FAN_FAILURE_MASK;
6927
6928         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6929                 is_required = 1;
6930
6931         /*
6932          * The fan failure mechanism is usually related to the PHY type since
6933          * the power consumption of the board is affected by the PHY. Currently,
6934          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6935          */
6936         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6937                 for (port = PORT_0; port < PORT_MAX; port++) {
6938                         is_required |=
6939                                 bnx2x_fan_failure_det_req(
6940                                         bp,
6941                                         bp->common.shmem_base,
6942                                         bp->common.shmem2_base,
6943                                         port);
6944                 }
6945
6946         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6947
6948         if (is_required == 0)
6949                 return;
6950
6951         /* Fan failure is indicated by SPIO 5 */
6952         bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6953
6954         /* set to active low mode */
6955         val = REG_RD(bp, MISC_REG_SPIO_INT);
6956         val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6957         REG_WR(bp, MISC_REG_SPIO_INT, val);
6958
6959         /* enable interrupt to signal the IGU */
6960         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6961         val |= MISC_SPIO_SPIO5;
6962         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6963 }
6964
6965 void bnx2x_pf_disable(struct bnx2x *bp)
6966 {
6967         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6968         val &= ~IGU_PF_CONF_FUNC_EN;
6969
6970         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6971         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6972         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6973 }
6974
6975 static void bnx2x__common_init_phy(struct bnx2x *bp)
6976 {
6977         u32 shmem_base[2], shmem2_base[2];
6978         /* Avoid common init in case MFW supports LFA */
6979         if (SHMEM2_RD(bp, size) >
6980             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6981                 return;
6982         shmem_base[0] =  bp->common.shmem_base;
6983         shmem2_base[0] = bp->common.shmem2_base;
6984         if (!CHIP_IS_E1x(bp)) {
6985                 shmem_base[1] =
6986                         SHMEM2_RD(bp, other_shmem_base_addr);
6987                 shmem2_base[1] =
6988                         SHMEM2_RD(bp, other_shmem2_base_addr);
6989         }
6990         bnx2x_acquire_phy_lock(bp);
6991         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6992                               bp->common.chip_id);
6993         bnx2x_release_phy_lock(bp);
6994 }
6995
6996 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
6997 {
6998         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
6999         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7000         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7001         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7002         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7003
7004         /* make sure this value is 0 */
7005         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7006
7007         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7008         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7009         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7010         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7011 }
7012
7013 static void bnx2x_set_endianity(struct bnx2x *bp)
7014 {
7015 #ifdef __BIG_ENDIAN
7016         bnx2x_config_endianity(bp, 1);
7017 #else
7018         bnx2x_config_endianity(bp, 0);
7019 #endif
7020 }
7021
7022 static void bnx2x_reset_endianity(struct bnx2x *bp)
7023 {
7024         bnx2x_config_endianity(bp, 0);
7025 }
7026
7027 /**
7028  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
7029  *
7030  * @bp:         driver handle
7031  */
7032 static int bnx2x_init_hw_common(struct bnx2x *bp)
7033 {
7034         u32 val;
7035
7036         DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
7037
7038         /*
7039          * take the RESET lock to protect undi_unload flow from accessing
7040          * registers while we're resetting the chip
7041          */
7042         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7043
7044         bnx2x_reset_common(bp);
7045         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7046
7047         val = 0xfffc;
7048         if (CHIP_IS_E3(bp)) {
7049                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7050                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7051         }
7052         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7053
7054         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7055
7056         bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7057
7058         if (!CHIP_IS_E1x(bp)) {
7059                 u8 abs_func_id;
7060
7061                 /**
7062                  * 4-port mode or 2-port mode we need to turn of master-enable
7063                  * for everyone, after that, turn it back on for self.
7064                  * so, we disregard multi-function or not, and always disable
7065                  * for all functions on the given path, this means 0,2,4,6 for
7066                  * path 0 and 1,3,5,7 for path 1
7067                  */
7068                 for (abs_func_id = BP_PATH(bp);
7069                      abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7070                         if (abs_func_id == BP_ABS_FUNC(bp)) {
7071                                 REG_WR(bp,
7072                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7073                                     1);
7074                                 continue;
7075                         }
7076
7077                         bnx2x_pretend_func(bp, abs_func_id);
7078                         /* clear pf enable */
7079                         bnx2x_pf_disable(bp);
7080                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7081                 }
7082         }
7083
7084         bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7085         if (CHIP_IS_E1(bp)) {
7086                 /* enable HW interrupt from PXP on USDM overflow
7087                    bit 16 on INT_MASK_0 */
7088                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7089         }
7090
7091         bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7092         bnx2x_init_pxp(bp);
7093         bnx2x_set_endianity(bp);
7094         bnx2x_ilt_init_page_size(bp, INITOP_SET);
7095
7096         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7097                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7098
7099         /* let the HW do it's magic ... */
7100         msleep(100);
7101         /* finish PXP init */
7102         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7103         if (val != 1) {
7104                 BNX2X_ERR("PXP2 CFG failed\n");
7105                 return -EBUSY;
7106         }
7107         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7108         if (val != 1) {
7109                 BNX2X_ERR("PXP2 RD_INIT failed\n");
7110                 return -EBUSY;
7111         }
7112
7113         /* Timers bug workaround E2 only. We need to set the entire ILT to
7114          * have entries with value "0" and valid bit on.
7115          * This needs to be done by the first PF that is loaded in a path
7116          * (i.e. common phase)
7117          */
7118         if (!CHIP_IS_E1x(bp)) {
7119 /* In E2 there is a bug in the timers block that can cause function 6 / 7
7120  * (i.e. vnic3) to start even if it is marked as "scan-off".
7121  * This occurs when a different function (func2,3) is being marked
7122  * as "scan-off". Real-life scenario for example: if a driver is being
7123  * load-unloaded while func6,7 are down. This will cause the timer to access
7124  * the ilt, translate to a logical address and send a request to read/write.
7125  * Since the ilt for the function that is down is not valid, this will cause
7126  * a translation error which is unrecoverable.
7127  * The Workaround is intended to make sure that when this happens nothing fatal
7128  * will occur. The workaround:
7129  *      1.  First PF driver which loads on a path will:
7130  *              a.  After taking the chip out of reset, by using pretend,
7131  *                  it will write "0" to the following registers of
7132  *                  the other vnics.
7133  *                  REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
7134  *                  REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
7135  *                  REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
7136  *                  And for itself it will write '1' to
7137  *                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
7138  *                  dmae-operations (writing to pram for example.)
7139  *                  note: can be done for only function 6,7 but cleaner this
7140  *                        way.
7141  *              b.  Write zero+valid to the entire ILT.
7142  *              c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
7143  *                  VNIC3 (of that port). The range allocated will be the
7144  *                  entire ILT. This is needed to prevent  ILT range error.
7145  *      2.  Any PF driver load flow:
7146  *              a.  ILT update with the physical addresses of the allocated
7147  *                  logical pages.
7148  *              b.  Wait 20msec. - note that this timeout is needed to make
7149  *                  sure there are no requests in one of the PXP internal
7150  *                  queues with "old" ILT addresses.
7151  *              c.  PF enable in the PGLC.
7152  *              d.  Clear the was_error of the PF in the PGLC. (could have
7153  *                  occurred while driver was down)
7154  *              e.  PF enable in the CFC (WEAK + STRONG)
7155  *              f.  Timers scan enable
7156  *      3.  PF driver unload flow:
7157  *              a.  Clear the Timers scan_en.
7158  *              b.  Polling for scan_on=0 for that PF.
7159  *              c.  Clear the PF enable bit in the PXP.
7160  *              d.  Clear the PF enable in the CFC (WEAK + STRONG)
7161  *              e.  Write zero+valid to all ILT entries (The valid bit must
7162  *                  stay set)
7163  *              f.  If this is VNIC 3 of a port then also init
7164  *                  first_timers_ilt_entry to zero and last_timers_ilt_entry
7165  *                  to the last entry in the ILT.
7166  *
7167  *      Notes:
7168  *      Currently the PF error in the PGLC is non recoverable.
7169  *      In the future the there will be a recovery routine for this error.
7170  *      Currently attention is masked.
7171  *      Having an MCP lock on the load/unload process does not guarantee that
7172  *      there is no Timer disable during Func6/7 enable. This is because the
7173  *      Timers scan is currently being cleared by the MCP on FLR.
7174  *      Step 2.d can be done only for PF6/7 and the driver can also check if
7175  *      there is error before clearing it. But the flow above is simpler and
7176  *      more general.
7177  *      All ILT entries are written by zero+valid and not just PF6/7
7178  *      ILT entries since in the future the ILT entries allocation for
7179  *      PF-s might be dynamic.
7180  */
7181                 struct ilt_client_info ilt_cli;
7182                 struct bnx2x_ilt ilt;
7183                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7184                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7185
7186                 /* initialize dummy TM client */
7187                 ilt_cli.start = 0;
7188                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7189                 ilt_cli.client_num = ILT_CLIENT_TM;
7190
7191                 /* Step 1: set zeroes to all ilt page entries with valid bit on
7192                  * Step 2: set the timers first/last ilt entry to point
7193                  * to the entire range to prevent ILT range error for 3rd/4th
7194                  * vnic (this code assumes existence of the vnic)
7195                  *
7196                  * both steps performed by call to bnx2x_ilt_client_init_op()
7197                  * with dummy TM client
7198                  *
7199                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
7200                  * and his brother are split registers
7201                  */
7202                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7203                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7204                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7205
7206                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7207                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7208                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7209         }
7210
7211         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7212         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7213
7214         if (!CHIP_IS_E1x(bp)) {
7215                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7216                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7217                 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7218
7219                 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7220
7221                 /* let the HW do it's magic ... */
7222                 do {
7223                         msleep(200);
7224                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7225                 } while (factor-- && (val != 1));
7226
7227                 if (val != 1) {
7228                         BNX2X_ERR("ATC_INIT failed\n");
7229                         return -EBUSY;
7230                 }
7231         }
7232
7233         bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7234
7235         bnx2x_iov_init_dmae(bp);
7236
7237         /* clean the DMAE memory */
7238         bp->dmae_ready = 1;
7239         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7240
7241         bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7242
7243         bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7244
7245         bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7246
7247         bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7248
7249         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7250         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7251         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7252         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7253
7254         bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7255
7256         /* QM queues pointers table */
7257         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7258
7259         /* soft reset pulse */
7260         REG_WR(bp, QM_REG_SOFT_RESET, 1);
7261         REG_WR(bp, QM_REG_SOFT_RESET, 0);
7262
7263         if (CNIC_SUPPORT(bp))
7264                 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7265
7266         bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7267
7268         if (!CHIP_REV_IS_SLOW(bp))
7269                 /* enable hw interrupt from doorbell Q */
7270                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7271
7272         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7273
7274         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7275         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7276
7277         if (!CHIP_IS_E1(bp))
7278                 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7279
7280         if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7281                 if (IS_MF_AFEX(bp)) {
7282                         /* configure that VNTag and VLAN headers must be
7283                          * received in afex mode
7284                          */
7285                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7286                         REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7287                         REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7288                         REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7289                         REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7290                 } else {
7291                         /* Bit-map indicating which L2 hdrs may appear
7292                          * after the basic Ethernet header
7293                          */
7294                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7295                                bp->path_has_ovlan ? 7 : 6);
7296                 }
7297         }
7298
7299         bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7300         bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7301         bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7302         bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7303
7304         if (!CHIP_IS_E1x(bp)) {
7305                 /* reset VFC memories */
7306                 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7307                            VFC_MEMORIES_RST_REG_CAM_RST |
7308                            VFC_MEMORIES_RST_REG_RAM_RST);
7309                 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7310                            VFC_MEMORIES_RST_REG_CAM_RST |
7311                            VFC_MEMORIES_RST_REG_RAM_RST);
7312
7313                 msleep(20);
7314         }
7315
7316         bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7317         bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7318         bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7319         bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7320
7321         /* sync semi rtc */
7322         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323                0x80000000);
7324         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7325                0x80000000);
7326
7327         bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7328         bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7329         bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7330
7331         if (!CHIP_IS_E1x(bp)) {
7332                 if (IS_MF_AFEX(bp)) {
7333                         /* configure that VNTag and VLAN headers must be
7334                          * sent in afex mode
7335                          */
7336                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7337                         REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7338                         REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7339                         REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7340                         REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7341                 } else {
7342                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7343                                bp->path_has_ovlan ? 7 : 6);
7344                 }
7345         }
7346
7347         REG_WR(bp, SRC_REG_SOFT_RST, 1);
7348
7349         bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7350
7351         if (CNIC_SUPPORT(bp)) {
7352                 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7353                 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7354                 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7355                 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7356                 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7357                 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7358                 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7359                 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7360                 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7361                 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7362         }
7363         REG_WR(bp, SRC_REG_SOFT_RST, 0);
7364
7365         if (sizeof(union cdu_context) != 1024)
7366                 /* we currently assume that a context is 1024 bytes */
7367                 dev_alert(&bp->pdev->dev,
7368                           "please adjust the size of cdu_context(%ld)\n",
7369                           (long)sizeof(union cdu_context));
7370
7371         bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7372         val = (4 << 24) + (0 << 12) + 1024;
7373         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7374
7375         bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7376         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7377         /* enable context validation interrupt from CFC */
7378         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7379
7380         /* set the thresholds to prevent CFC/CDU race */
7381         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7382
7383         bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7384
7385         if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7386                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7387
7388         bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7389         bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7390
7391         /* Reset PCIE errors for debug */
7392         REG_WR(bp, 0x2814, 0xffffffff);
7393         REG_WR(bp, 0x3820, 0xffffffff);
7394
7395         if (!CHIP_IS_E1x(bp)) {
7396                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7397                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7398                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7399                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7400                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7401                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7402                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7403                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7404                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7405                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7406                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7407         }
7408
7409         bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7410         if (!CHIP_IS_E1(bp)) {
7411                 /* in E3 this done in per-port section */
7412                 if (!CHIP_IS_E3(bp))
7413                         REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7414         }
7415         if (CHIP_IS_E1H(bp))
7416                 /* not applicable for E2 (and above ...) */
7417                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7418
7419         if (CHIP_REV_IS_SLOW(bp))
7420                 msleep(200);
7421
7422         /* finish CFC init */
7423         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7424         if (val != 1) {
7425                 BNX2X_ERR("CFC LL_INIT failed\n");
7426                 return -EBUSY;
7427         }
7428         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7429         if (val != 1) {
7430                 BNX2X_ERR("CFC AC_INIT failed\n");
7431                 return -EBUSY;
7432         }
7433         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7434         if (val != 1) {
7435                 BNX2X_ERR("CFC CAM_INIT failed\n");
7436                 return -EBUSY;
7437         }
7438         REG_WR(bp, CFC_REG_DEBUG0, 0);
7439
7440         if (CHIP_IS_E1(bp)) {
7441                 /* read NIG statistic
7442                    to see if this is our first up since powerup */
7443                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7444                 val = *bnx2x_sp(bp, wb_data[0]);
7445
7446                 /* do internal memory self test */
7447                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7448                         BNX2X_ERR("internal mem self test failed\n");
7449                         return -EBUSY;
7450                 }
7451         }
7452
7453         bnx2x_setup_fan_failure_detection(bp);
7454
7455         /* clear PXP2 attentions */
7456         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7457
7458         bnx2x_enable_blocks_attention(bp);
7459         bnx2x_enable_blocks_parity(bp);
7460
7461         if (!BP_NOMCP(bp)) {
7462                 if (CHIP_IS_E1x(bp))
7463                         bnx2x__common_init_phy(bp);
7464         } else
7465                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7466
7467         if (SHMEM2_HAS(bp, netproc_fw_ver))
7468                 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7469
7470         return 0;
7471 }
7472
7473 /**
7474  * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7475  *
7476  * @bp:         driver handle
7477  */
7478 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7479 {
7480         int rc = bnx2x_init_hw_common(bp);
7481
7482         if (rc)
7483                 return rc;
7484
7485         /* In E2 2-PORT mode, same ext phy is used for the two paths */
7486         if (!BP_NOMCP(bp))
7487                 bnx2x__common_init_phy(bp);
7488
7489         return 0;
7490 }
7491
7492 static int bnx2x_init_hw_port(struct bnx2x *bp)
7493 {
7494         int port = BP_PORT(bp);
7495         int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7496         u32 low, high;
7497         u32 val, reg;
7498
7499         DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
7500
7501         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7502
7503         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7504         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7505         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7506
7507         /* Timers bug workaround: disables the pf_master bit in pglue at
7508          * common phase, we need to enable it here before any dmae access are
7509          * attempted. Therefore we manually added the enable-master to the
7510          * port phase (it also happens in the function phase)
7511          */
7512         if (!CHIP_IS_E1x(bp))
7513                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7514
7515         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7516         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7517         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7518         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7519
7520         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7521         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7522         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7523         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7524
7525         /* QM cid (connection) count */
7526         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7527
7528         if (CNIC_SUPPORT(bp)) {
7529                 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7530                 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7531                 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7532         }
7533
7534         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7535
7536         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7537
7538         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7539
7540                 if (IS_MF(bp))
7541                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7542                 else if (bp->dev->mtu > 4096) {
7543                         if (bp->flags & ONE_PORT_FLAG)
7544                                 low = 160;
7545                         else {
7546                                 val = bp->dev->mtu;
7547                                 /* (24*1024 + val*4)/256 */
7548                                 low = 96 + (val/64) +
7549                                                 ((val % 64) ? 1 : 0);
7550                         }
7551                 } else
7552                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7553                 high = low + 56;        /* 14*1024/256 */
7554                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7555                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7556         }
7557
7558         if (CHIP_MODE_IS_4_PORT(bp))
7559                 REG_WR(bp, (BP_PORT(bp) ?
7560                             BRB1_REG_MAC_GUARANTIED_1 :
7561                             BRB1_REG_MAC_GUARANTIED_0), 40);
7562
7563         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7564         if (CHIP_IS_E3B0(bp)) {
7565                 if (IS_MF_AFEX(bp)) {
7566                         /* configure headers for AFEX mode */
7567                         REG_WR(bp, BP_PORT(bp) ?
7568                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7569                                PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7570                         REG_WR(bp, BP_PORT(bp) ?
7571                                PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7572                                PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7573                         REG_WR(bp, BP_PORT(bp) ?
7574                                PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7575                                PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7576                 } else {
7577                         /* Ovlan exists only if we are in multi-function +
7578                          * switch-dependent mode, in switch-independent there
7579                          * is no ovlan headers
7580                          */
7581                         REG_WR(bp, BP_PORT(bp) ?
7582                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7583                                PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7584                                (bp->path_has_ovlan ? 7 : 6));
7585                 }
7586         }
7587
7588         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7589         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7590         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7591         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7592
7593         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7594         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7595         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7596         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7597
7598         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7599         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7600
7601         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7602
7603         if (CHIP_IS_E1x(bp)) {
7604                 /* configure PBF to work without PAUSE mtu 9000 */
7605                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7606
7607                 /* update threshold */
7608                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7609                 /* update init credit */
7610                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7611
7612                 /* probe changes */
7613                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7614                 udelay(50);
7615                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7616         }
7617
7618         if (CNIC_SUPPORT(bp))
7619                 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7620
7621         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7622         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7623
7624         if (CHIP_IS_E1(bp)) {
7625                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7626                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7627         }
7628         bnx2x_init_block(bp, BLOCK_HC, init_phase);
7629
7630         bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7631
7632         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7633         /* init aeu_mask_attn_func_0/1:
7634          *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7635          *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7636          *             bits 4-7 are used for "per vn group attention" */
7637         val = IS_MF(bp) ? 0xF7 : 0x7;
7638         /* Enable DCBX attention for all but E1 */
7639         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7640         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7641
7642         /* SCPAD_PARITY should NOT trigger close the gates */
7643         reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7644         REG_WR(bp, reg,
7645                REG_RD(bp, reg) &
7646                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7647
7648         reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7649         REG_WR(bp, reg,
7650                REG_RD(bp, reg) &
7651                ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7652
7653         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7654
7655         if (!CHIP_IS_E1x(bp)) {
7656                 /* Bit-map indicating which L2 hdrs may appear after the
7657                  * basic Ethernet header
7658                  */
7659                 if (IS_MF_AFEX(bp))
7660                         REG_WR(bp, BP_PORT(bp) ?
7661                                NIG_REG_P1_HDRS_AFTER_BASIC :
7662                                NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7663                 else
7664                         REG_WR(bp, BP_PORT(bp) ?
7665                                NIG_REG_P1_HDRS_AFTER_BASIC :
7666                                NIG_REG_P0_HDRS_AFTER_BASIC,
7667                                IS_MF_SD(bp) ? 7 : 6);
7668
7669                 if (CHIP_IS_E3(bp))
7670                         REG_WR(bp, BP_PORT(bp) ?
7671                                    NIG_REG_LLH1_MF_MODE :
7672                                    NIG_REG_LLH_MF_MODE, IS_MF(bp));
7673         }
7674         if (!CHIP_IS_E3(bp))
7675                 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7676
7677         if (!CHIP_IS_E1(bp)) {
7678                 /* 0x2 disable mf_ov, 0x1 enable */
7679                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7680                        (IS_MF_SD(bp) ? 0x1 : 0x2));
7681
7682                 if (!CHIP_IS_E1x(bp)) {
7683                         val = 0;
7684                         switch (bp->mf_mode) {
7685                         case MULTI_FUNCTION_SD:
7686                                 val = 1;
7687                                 break;
7688                         case MULTI_FUNCTION_SI:
7689                         case MULTI_FUNCTION_AFEX:
7690                                 val = 2;
7691                                 break;
7692                         }
7693
7694                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7695                                                   NIG_REG_LLH0_CLS_TYPE), val);
7696                 }
7697                 {
7698                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7699                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7700                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7701                 }
7702         }
7703
7704         /* If SPIO5 is set to generate interrupts, enable it for this port */
7705         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7706         if (val & MISC_SPIO_SPIO5) {
7707                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7708                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7709                 val = REG_RD(bp, reg_addr);
7710                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7711                 REG_WR(bp, reg_addr, val);
7712         }
7713
7714         return 0;
7715 }
7716
7717 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7718 {
7719         int reg;
7720         u32 wb_write[2];
7721
7722         if (CHIP_IS_E1(bp))
7723                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7724         else
7725                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7726
7727         wb_write[0] = ONCHIP_ADDR1(addr);
7728         wb_write[1] = ONCHIP_ADDR2(addr);
7729         REG_WR_DMAE(bp, reg, wb_write, 2);
7730 }
7731
7732 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7733 {
7734         u32 data, ctl, cnt = 100;
7735         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7736         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7737         u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7738         u32 sb_bit =  1 << (idu_sb_id%32);
7739         u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7740         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7741
7742         /* Not supported in BC mode */
7743         if (CHIP_INT_MODE_IS_BC(bp))
7744                 return;
7745
7746         data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7747                         << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
7748                 IGU_REGULAR_CLEANUP_SET                         |
7749                 IGU_REGULAR_BCLEANUP;
7750
7751         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
7752               func_encode << IGU_CTRL_REG_FID_SHIFT             |
7753               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7754
7755         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7756                          data, igu_addr_data);
7757         REG_WR(bp, igu_addr_data, data);
7758         mmiowb();
7759         barrier();
7760         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7761                           ctl, igu_addr_ctl);
7762         REG_WR(bp, igu_addr_ctl, ctl);
7763         mmiowb();
7764         barrier();
7765
7766         /* wait for clean up to finish */
7767         while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7768                 msleep(20);
7769
7770         if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7771                 DP(NETIF_MSG_HW,
7772                    "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7773                           idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7774         }
7775 }
7776
7777 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7778 {
7779         bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7780 }
7781
7782 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7783 {
7784         u32 i, base = FUNC_ILT_BASE(func);
7785         for (i = base; i < base + ILT_PER_FUNC; i++)
7786                 bnx2x_ilt_wr(bp, i, 0);
7787 }
7788
7789 static void bnx2x_init_searcher(struct bnx2x *bp)
7790 {
7791         int port = BP_PORT(bp);
7792         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7793         /* T1 hash bits value determines the T1 number of entries */
7794         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7795 }
7796
7797 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7798 {
7799         int rc;
7800         struct bnx2x_func_state_params func_params = {NULL};
7801         struct bnx2x_func_switch_update_params *switch_update_params =
7802                 &func_params.params.switch_update;
7803
7804         /* Prepare parameters for function state transitions */
7805         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7806         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7807
7808         func_params.f_obj = &bp->func_obj;
7809         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7810
7811         /* Function parameters */
7812         __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7813                   &switch_update_params->changes);
7814         if (suspend)
7815                 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7816                           &switch_update_params->changes);
7817
7818         rc = bnx2x_func_state_change(bp, &func_params);
7819
7820         return rc;
7821 }
7822
7823 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7824 {
7825         int rc, i, port = BP_PORT(bp);
7826         int vlan_en = 0, mac_en[NUM_MACS];
7827
7828         /* Close input from network */
7829         if (bp->mf_mode == SINGLE_FUNCTION) {
7830                 bnx2x_set_rx_filter(&bp->link_params, 0);
7831         } else {
7832                 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7833                                    NIG_REG_LLH0_FUNC_EN);
7834                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7835                           NIG_REG_LLH0_FUNC_EN, 0);
7836                 for (i = 0; i < NUM_MACS; i++) {
7837                         mac_en[i] = REG_RD(bp, port ?
7838                                              (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7839                                               4 * i) :
7840                                              (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7841                                               4 * i));
7842                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7843                                               4 * i) :
7844                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7845                 }
7846         }
7847
7848         /* Close BMC to host */
7849         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7850                NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7851
7852         /* Suspend Tx switching to the PF. Completion of this ramrod
7853          * further guarantees that all the packets of that PF / child
7854          * VFs in BRB were processed by the Parser, so it is safe to
7855          * change the NIC_MODE register.
7856          */
7857         rc = bnx2x_func_switch_update(bp, 1);
7858         if (rc) {
7859                 BNX2X_ERR("Can't suspend tx-switching!\n");
7860                 return rc;
7861         }
7862
7863         /* Change NIC_MODE register */
7864         REG_WR(bp, PRS_REG_NIC_MODE, 0);
7865
7866         /* Open input from network */
7867         if (bp->mf_mode == SINGLE_FUNCTION) {
7868                 bnx2x_set_rx_filter(&bp->link_params, 1);
7869         } else {
7870                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7871                           NIG_REG_LLH0_FUNC_EN, vlan_en);
7872                 for (i = 0; i < NUM_MACS; i++) {
7873                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7874                                               4 * i) :
7875                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7876                                   mac_en[i]);
7877                 }
7878         }
7879
7880         /* Enable BMC to host */
7881         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7882                NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7883
7884         /* Resume Tx switching to the PF */
7885         rc = bnx2x_func_switch_update(bp, 0);
7886         if (rc) {
7887                 BNX2X_ERR("Can't resume tx-switching!\n");
7888                 return rc;
7889         }
7890
7891         DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7892         return 0;
7893 }
7894
7895 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7896 {
7897         int rc;
7898
7899         bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7900
7901         if (CONFIGURE_NIC_MODE(bp)) {
7902                 /* Configure searcher as part of function hw init */
7903                 bnx2x_init_searcher(bp);
7904
7905                 /* Reset NIC mode */
7906                 rc = bnx2x_reset_nic_mode(bp);
7907                 if (rc)
7908                         BNX2X_ERR("Can't change NIC mode!\n");
7909                 return rc;
7910         }
7911
7912         return 0;
7913 }
7914
7915 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
7916  * and boot began, or when kdump kernel was loaded. Either case would invalidate
7917  * the addresses of the transaction, resulting in was-error bit set in the pci
7918  * causing all hw-to-host pcie transactions to timeout. If this happened we want
7919  * to clear the interrupt which detected this from the pglueb and the was done
7920  * bit
7921  */
7922 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7923 {
7924         if (!CHIP_IS_E1x(bp))
7925                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7926                        1 << BP_ABS_FUNC(bp));
7927 }
7928
7929 static int bnx2x_init_hw_func(struct bnx2x *bp)
7930 {
7931         int port = BP_PORT(bp);
7932         int func = BP_FUNC(bp);
7933         int init_phase = PHASE_PF0 + func;
7934         struct bnx2x_ilt *ilt = BP_ILT(bp);
7935         u16 cdu_ilt_start;
7936         u32 addr, val;
7937         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7938         int i, main_mem_width, rc;
7939
7940         DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
7941
7942         /* FLR cleanup - hmmm */
7943         if (!CHIP_IS_E1x(bp)) {
7944                 rc = bnx2x_pf_flr_clnup(bp);
7945                 if (rc) {
7946                         bnx2x_fw_dump(bp);
7947                         return rc;
7948                 }
7949         }
7950
7951         /* set MSI reconfigure capability */
7952         if (bp->common.int_block == INT_BLOCK_HC) {
7953                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7954                 val = REG_RD(bp, addr);
7955                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7956                 REG_WR(bp, addr, val);
7957         }
7958
7959         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7960         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7961
7962         ilt = BP_ILT(bp);
7963         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7964
7965         if (IS_SRIOV(bp))
7966                 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7967         cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7968
7969         /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7970          * those of the VFs, so start line should be reset
7971          */
7972         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7973         for (i = 0; i < L2_ILT_LINES(bp); i++) {
7974                 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7975                 ilt->lines[cdu_ilt_start + i].page_mapping =
7976                         bp->context[i].cxt_mapping;
7977                 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7978         }
7979
7980         bnx2x_ilt_init_op(bp, INITOP_SET);
7981
7982         if (!CONFIGURE_NIC_MODE(bp)) {
7983                 bnx2x_init_searcher(bp);
7984                 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7985                 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7986         } else {
7987                 /* Set NIC mode */
7988                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7989                 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7990         }
7991
7992         if (!CHIP_IS_E1x(bp)) {
7993                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7994
7995                 /* Turn on a single ISR mode in IGU if driver is going to use
7996                  * INT#x or MSI
7997                  */
7998                 if (!(bp->flags & USING_MSIX_FLAG))
7999                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8000                 /*
8001                  * Timers workaround bug: function init part.
8002                  * Need to wait 20msec after initializing ILT,
8003                  * needed to make sure there are no requests in
8004                  * one of the PXP internal queues with "old" ILT addresses
8005                  */
8006                 msleep(20);
8007                 /*
8008                  * Master enable - Due to WB DMAE writes performed before this
8009                  * register is re-initialized as part of the regular function
8010                  * init
8011                  */
8012                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8013                 /* Enable the function in IGU */
8014                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8015         }
8016
8017         bp->dmae_ready = 1;
8018
8019         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8020
8021         bnx2x_clean_pglue_errors(bp);
8022
8023         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8024         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8025         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8026         bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8027         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8028         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8029         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8030         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8031         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8032         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8033         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8034         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8035         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8036
8037         if (!CHIP_IS_E1x(bp))
8038                 REG_WR(bp, QM_REG_PF_EN, 1);
8039
8040         if (!CHIP_IS_E1x(bp)) {
8041                 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8042                 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8043                 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8044                 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8045         }
8046         bnx2x_init_block(bp, BLOCK_QM, init_phase);
8047
8048         bnx2x_init_block(bp, BLOCK_TM, init_phase);
8049         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8050         REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
8051
8052         bnx2x_iov_init_dq(bp);
8053
8054         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8055         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8056         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8057         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8058         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8059         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8060         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8061         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8062         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8063         if (!CHIP_IS_E1x(bp))
8064                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8065
8066         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8067
8068         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8069
8070         if (!CHIP_IS_E1x(bp))
8071                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8072
8073         if (IS_MF(bp)) {
8074                 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8075                         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8076                         REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8077                                bp->mf_ov);
8078                 }
8079         }
8080
8081         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8082
8083         /* HC init per function */
8084         if (bp->common.int_block == INT_BLOCK_HC) {
8085                 if (CHIP_IS_E1H(bp)) {
8086                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8087
8088                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8089                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8090                 }
8091                 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8092
8093         } else {
8094                 int num_segs, sb_idx, prod_offset;
8095
8096                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8097
8098                 if (!CHIP_IS_E1x(bp)) {
8099                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8100                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8101                 }
8102
8103                 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8104
8105                 if (!CHIP_IS_E1x(bp)) {
8106                         int dsb_idx = 0;
8107                         /**
8108                          * Producer memory:
8109                          * E2 mode: address 0-135 match to the mapping memory;
8110                          * 136 - PF0 default prod; 137 - PF1 default prod;
8111                          * 138 - PF2 default prod; 139 - PF3 default prod;
8112                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
8113                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
8114                          * 144-147 reserved.
8115                          *
8116                          * E1.5 mode - In backward compatible mode;
8117                          * for non default SB; each even line in the memory
8118                          * holds the U producer and each odd line hold
8119                          * the C producer. The first 128 producers are for
8120                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
8121                          * producers are for the DSB for each PF.
8122                          * Each PF has five segments: (the order inside each
8123                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
8124                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
8125                          * 144-147 attn prods;
8126                          */
8127                         /* non-default-status-blocks */
8128                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8129                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8130                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8131                                 prod_offset = (bp->igu_base_sb + sb_idx) *
8132                                         num_segs;
8133
8134                                 for (i = 0; i < num_segs; i++) {
8135                                         addr = IGU_REG_PROD_CONS_MEMORY +
8136                                                         (prod_offset + i) * 4;
8137                                         REG_WR(bp, addr, 0);
8138                                 }
8139                                 /* send consumer update with value 0 */
8140                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8141                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8142                                 bnx2x_igu_clear_sb(bp,
8143                                                    bp->igu_base_sb + sb_idx);
8144                         }
8145
8146                         /* default-status-blocks */
8147                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8148                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8149
8150                         if (CHIP_MODE_IS_4_PORT(bp))
8151                                 dsb_idx = BP_FUNC(bp);
8152                         else
8153                                 dsb_idx = BP_VN(bp);
8154
8155                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8156                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
8157                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
8158
8159                         /*
8160                          * igu prods come in chunks of E1HVN_MAX (4) -
8161                          * does not matters what is the current chip mode
8162                          */
8163                         for (i = 0; i < (num_segs * E1HVN_MAX);
8164                              i += E1HVN_MAX) {
8165                                 addr = IGU_REG_PROD_CONS_MEMORY +
8166                                                         (prod_offset + i)*4;
8167                                 REG_WR(bp, addr, 0);
8168                         }
8169                         /* send consumer update with 0 */
8170                         if (CHIP_INT_MODE_IS_BC(bp)) {
8171                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8172                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8173                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8174                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
8175                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8176                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
8177                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8178                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
8179                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8180                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8181                         } else {
8182                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8183                                              USTORM_ID, 0, IGU_INT_NOP, 1);
8184                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8185                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
8186                         }
8187                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8188
8189                         /* !!! These should become driver const once
8190                            rf-tool supports split-68 const */
8191                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8192                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8193                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8194                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8195                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8196                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8197                 }
8198         }
8199
8200         /* Reset PCIE errors for debug */
8201         REG_WR(bp, 0x2114, 0xffffffff);
8202         REG_WR(bp, 0x2120, 0xffffffff);
8203
8204         if (CHIP_IS_E1x(bp)) {
8205                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
8206                 main_mem_base = HC_REG_MAIN_MEMORY +
8207                                 BP_PORT(bp) * (main_mem_size * 4);
8208                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8209                 main_mem_width = 8;
8210
8211                 val = REG_RD(bp, main_mem_prty_clr);
8212                 if (val)
8213                         DP(NETIF_MSG_HW,
8214                            "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8215                            val);
8216
8217                 /* Clear "false" parity errors in MSI-X table */
8218                 for (i = main_mem_base;
8219                      i < main_mem_base + main_mem_size * 4;
8220                      i += main_mem_width) {
8221                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
8222                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8223                                          i, main_mem_width / 4);
8224                 }
8225                 /* Clear HC parity attention */
8226                 REG_RD(bp, main_mem_prty_clr);
8227         }
8228
8229 #ifdef BNX2X_STOP_ON_ERROR
8230         /* Enable STORMs SP logging */
8231         REG_WR8(bp, BAR_USTRORM_INTMEM +
8232                USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8233         REG_WR8(bp, BAR_TSTRORM_INTMEM +
8234                TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8235         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8236                CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8237         REG_WR8(bp, BAR_XSTRORM_INTMEM +
8238                XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8239 #endif
8240
8241         bnx2x_phy_probe(&bp->link_params);
8242
8243         return 0;
8244 }
8245
8246 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8247 {
8248         bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8249
8250         if (!CHIP_IS_E1x(bp))
8251                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8252                                sizeof(struct host_hc_status_block_e2));
8253         else
8254                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8255                                sizeof(struct host_hc_status_block_e1x));
8256
8257         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8258 }
8259
8260 void bnx2x_free_mem(struct bnx2x *bp)
8261 {
8262         int i;
8263
8264         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8265                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8266
8267         if (IS_VF(bp))
8268                 return;
8269
8270         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8271                        sizeof(struct host_sp_status_block));
8272
8273         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8274                        sizeof(struct bnx2x_slowpath));
8275
8276         for (i = 0; i < L2_ILT_LINES(bp); i++)
8277                 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8278                                bp->context[i].size);
8279         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8280
8281         BNX2X_FREE(bp->ilt->lines);
8282
8283         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8284
8285         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8286                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
8287
8288         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8289
8290         bnx2x_iov_free_mem(bp);
8291 }
8292
8293 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8294 {
8295         if (!CHIP_IS_E1x(bp)) {
8296                 /* size = the status block + ramrod buffers */
8297                 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8298                                                     sizeof(struct host_hc_status_block_e2));
8299                 if (!bp->cnic_sb.e2_sb)
8300                         goto alloc_mem_err;
8301         } else {
8302                 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8303                                                      sizeof(struct host_hc_status_block_e1x));
8304                 if (!bp->cnic_sb.e1x_sb)
8305                         goto alloc_mem_err;
8306         }
8307
8308         if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8309                 /* allocate searcher T2 table, as it wasn't allocated before */
8310                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8311                 if (!bp->t2)
8312                         goto alloc_mem_err;
8313         }
8314
8315         /* write address to which L5 should insert its values */
8316         bp->cnic_eth_dev.addr_drv_info_to_mcp =
8317                 &bp->slowpath->drv_info_to_mcp;
8318
8319         if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8320                 goto alloc_mem_err;
8321
8322         return 0;
8323
8324 alloc_mem_err:
8325         bnx2x_free_mem_cnic(bp);
8326         BNX2X_ERR("Can't allocate memory\n");
8327         return -ENOMEM;
8328 }
8329
8330 int bnx2x_alloc_mem(struct bnx2x *bp)
8331 {
8332         int i, allocated, context_size;
8333
8334         if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8335                 /* allocate searcher T2 table */
8336                 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8337                 if (!bp->t2)
8338                         goto alloc_mem_err;
8339         }
8340
8341         bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8342                                              sizeof(struct host_sp_status_block));
8343         if (!bp->def_status_blk)
8344                 goto alloc_mem_err;
8345
8346         bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8347                                        sizeof(struct bnx2x_slowpath));
8348         if (!bp->slowpath)
8349                 goto alloc_mem_err;
8350
8351         /* Allocate memory for CDU context:
8352          * This memory is allocated separately and not in the generic ILT
8353          * functions because CDU differs in few aspects:
8354          * 1. There are multiple entities allocating memory for context -
8355          * 'regular' driver, CNIC and SRIOV driver. Each separately controls
8356          * its own ILT lines.
8357          * 2. Since CDU page-size is not a single 4KB page (which is the case
8358          * for the other ILT clients), to be efficient we want to support
8359          * allocation of sub-page-size in the last entry.
8360          * 3. Context pointers are used by the driver to pass to FW / update
8361          * the context (for the other ILT clients the pointers are used just to
8362          * free the memory during unload).
8363          */
8364         context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8365
8366         for (i = 0, allocated = 0; allocated < context_size; i++) {
8367                 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8368                                           (context_size - allocated));
8369                 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8370                                                       bp->context[i].size);
8371                 if (!bp->context[i].vcxt)
8372                         goto alloc_mem_err;
8373                 allocated += bp->context[i].size;
8374         }
8375         bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8376                                  GFP_KERNEL);
8377         if (!bp->ilt->lines)
8378                 goto alloc_mem_err;
8379
8380         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8381                 goto alloc_mem_err;
8382
8383         if (bnx2x_iov_alloc_mem(bp))
8384                 goto alloc_mem_err;
8385
8386         /* Slow path ring */
8387         bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8388         if (!bp->spq)
8389                 goto alloc_mem_err;
8390
8391         /* EQ */
8392         bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8393                                       BCM_PAGE_SIZE * NUM_EQ_PAGES);
8394         if (!bp->eq_ring)
8395                 goto alloc_mem_err;
8396
8397         return 0;
8398
8399 alloc_mem_err:
8400         bnx2x_free_mem(bp);
8401         BNX2X_ERR("Can't allocate memory\n");
8402         return -ENOMEM;
8403 }
8404
8405 /*
8406  * Init service functions
8407  */
8408
8409 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
8410                       struct bnx2x_vlan_mac_obj *obj, bool set,
8411                       int mac_type, unsigned long *ramrod_flags)
8412 {
8413         int rc;
8414         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8415
8416         memset(&ramrod_param, 0, sizeof(ramrod_param));
8417
8418         /* Fill general parameters */
8419         ramrod_param.vlan_mac_obj = obj;
8420         ramrod_param.ramrod_flags = *ramrod_flags;
8421
8422         /* Fill a user request section if needed */
8423         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8424                 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8425
8426                 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8427
8428                 /* Set the command: ADD or DEL */
8429                 if (set)
8430                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8431                 else
8432                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8433         }
8434
8435         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8436
8437         if (rc == -EEXIST) {
8438                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8439                 /* do not treat adding same MAC as error */
8440                 rc = 0;
8441         } else if (rc < 0)
8442                 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8443
8444         return rc;
8445 }
8446
8447 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8448                        struct bnx2x_vlan_mac_obj *obj, bool set,
8449                        unsigned long *ramrod_flags)
8450 {
8451         int rc;
8452         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8453
8454         memset(&ramrod_param, 0, sizeof(ramrod_param));
8455
8456         /* Fill general parameters */
8457         ramrod_param.vlan_mac_obj = obj;
8458         ramrod_param.ramrod_flags = *ramrod_flags;
8459
8460         /* Fill a user request section if needed */
8461         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8462                 ramrod_param.user_req.u.vlan.vlan = vlan;
8463                 /* Set the command: ADD or DEL */
8464                 if (set)
8465                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8466                 else
8467                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8468         }
8469
8470         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8471
8472         if (rc == -EEXIST) {
8473                 /* Do not treat adding same vlan as error. */
8474                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8475                 rc = 0;
8476         } else if (rc < 0) {
8477                 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8478         }
8479
8480         return rc;
8481 }
8482
8483 int bnx2x_del_all_macs(struct bnx2x *bp,
8484                        struct bnx2x_vlan_mac_obj *mac_obj,
8485                        int mac_type, bool wait_for_comp)
8486 {
8487         int rc;
8488         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8489
8490         /* Wait for completion of requested */
8491         if (wait_for_comp)
8492                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8493
8494         /* Set the mac type of addresses we want to clear */
8495         __set_bit(mac_type, &vlan_mac_flags);
8496
8497         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8498         if (rc < 0)
8499                 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8500
8501         return rc;
8502 }
8503
8504 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8505 {
8506         if (IS_PF(bp)) {
8507                 unsigned long ramrod_flags = 0;
8508
8509                 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8510                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8511                 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8512                                          &bp->sp_objs->mac_obj, set,
8513                                          BNX2X_ETH_MAC, &ramrod_flags);
8514         } else { /* vf */
8515                 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8516                                              bp->fp->index, set);
8517         }
8518 }
8519
8520 int bnx2x_setup_leading(struct bnx2x *bp)
8521 {
8522         if (IS_PF(bp))
8523                 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8524         else /* VF */
8525                 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8526 }
8527
8528 /**
8529  * bnx2x_set_int_mode - configure interrupt mode
8530  *
8531  * @bp:         driver handle
8532  *
8533  * In case of MSI-X it will also try to enable MSI-X.
8534  */
8535 int bnx2x_set_int_mode(struct bnx2x *bp)
8536 {
8537         int rc = 0;
8538
8539         if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8540                 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8541                 return -EINVAL;
8542         }
8543
8544         switch (int_mode) {
8545         case BNX2X_INT_MODE_MSIX:
8546                 /* attempt to enable msix */
8547                 rc = bnx2x_enable_msix(bp);
8548
8549                 /* msix attained */
8550                 if (!rc)
8551                         return 0;
8552
8553                 /* vfs use only msix */
8554                 if (rc && IS_VF(bp))
8555                         return rc;
8556
8557                 /* failed to enable multiple MSI-X */
8558                 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8559                                bp->num_queues,
8560                                1 + bp->num_cnic_queues);
8561
8562                 /* falling through... */
8563         case BNX2X_INT_MODE_MSI:
8564                 bnx2x_enable_msi(bp);
8565
8566                 /* falling through... */
8567         case BNX2X_INT_MODE_INTX:
8568                 bp->num_ethernet_queues = 1;
8569                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8570                 BNX2X_DEV_INFO("set number of queues to 1\n");
8571                 break;
8572         default:
8573                 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8574                 return -EINVAL;
8575         }
8576         return 0;
8577 }
8578
8579 /* must be called prior to any HW initializations */
8580 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8581 {
8582         if (IS_SRIOV(bp))
8583                 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8584         return L2_ILT_LINES(bp);
8585 }
8586
8587 void bnx2x_ilt_set_info(struct bnx2x *bp)
8588 {
8589         struct ilt_client_info *ilt_client;
8590         struct bnx2x_ilt *ilt = BP_ILT(bp);
8591         u16 line = 0;
8592
8593         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8594         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8595
8596         /* CDU */
8597         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8598         ilt_client->client_num = ILT_CLIENT_CDU;
8599         ilt_client->page_size = CDU_ILT_PAGE_SZ;
8600         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8601         ilt_client->start = line;
8602         line += bnx2x_cid_ilt_lines(bp);
8603
8604         if (CNIC_SUPPORT(bp))
8605                 line += CNIC_ILT_LINES;
8606         ilt_client->end = line - 1;
8607
8608         DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8609            ilt_client->start,
8610            ilt_client->end,
8611            ilt_client->page_size,
8612            ilt_client->flags,
8613            ilog2(ilt_client->page_size >> 12));
8614
8615         /* QM */
8616         if (QM_INIT(bp->qm_cid_count)) {
8617                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8618                 ilt_client->client_num = ILT_CLIENT_QM;
8619                 ilt_client->page_size = QM_ILT_PAGE_SZ;
8620                 ilt_client->flags = 0;
8621                 ilt_client->start = line;
8622
8623                 /* 4 bytes for each cid */
8624                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8625                                                          QM_ILT_PAGE_SZ);
8626
8627                 ilt_client->end = line - 1;
8628
8629                 DP(NETIF_MSG_IFUP,
8630                    "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8631                    ilt_client->start,
8632                    ilt_client->end,
8633                    ilt_client->page_size,
8634                    ilt_client->flags,
8635                    ilog2(ilt_client->page_size >> 12));
8636         }
8637
8638         if (CNIC_SUPPORT(bp)) {
8639                 /* SRC */
8640                 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8641                 ilt_client->client_num = ILT_CLIENT_SRC;
8642                 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8643                 ilt_client->flags = 0;
8644                 ilt_client->start = line;
8645                 line += SRC_ILT_LINES;
8646                 ilt_client->end = line - 1;
8647
8648                 DP(NETIF_MSG_IFUP,
8649                    "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8650                    ilt_client->start,
8651                    ilt_client->end,
8652                    ilt_client->page_size,
8653                    ilt_client->flags,
8654                    ilog2(ilt_client->page_size >> 12));
8655
8656                 /* TM */
8657                 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8658                 ilt_client->client_num = ILT_CLIENT_TM;
8659                 ilt_client->page_size = TM_ILT_PAGE_SZ;
8660                 ilt_client->flags = 0;
8661                 ilt_client->start = line;
8662                 line += TM_ILT_LINES;
8663                 ilt_client->end = line - 1;
8664
8665                 DP(NETIF_MSG_IFUP,
8666                    "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8667                    ilt_client->start,
8668                    ilt_client->end,
8669                    ilt_client->page_size,
8670                    ilt_client->flags,
8671                    ilog2(ilt_client->page_size >> 12));
8672         }
8673
8674         BUG_ON(line > ILT_MAX_LINES);
8675 }
8676
8677 /**
8678  * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8679  *
8680  * @bp:                 driver handle
8681  * @fp:                 pointer to fastpath
8682  * @init_params:        pointer to parameters structure
8683  *
8684  * parameters configured:
8685  *      - HC configuration
8686  *      - Queue's CDU context
8687  */
8688 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8689         struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8690 {
8691         u8 cos;
8692         int cxt_index, cxt_offset;
8693
8694         /* FCoE Queue uses Default SB, thus has no HC capabilities */
8695         if (!IS_FCOE_FP(fp)) {
8696                 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8697                 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8698
8699                 /* If HC is supported, enable host coalescing in the transition
8700                  * to INIT state.
8701                  */
8702                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8703                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8704
8705                 /* HC rate */
8706                 init_params->rx.hc_rate = bp->rx_ticks ?
8707                         (1000000 / bp->rx_ticks) : 0;
8708                 init_params->tx.hc_rate = bp->tx_ticks ?
8709                         (1000000 / bp->tx_ticks) : 0;
8710
8711                 /* FW SB ID */
8712                 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8713                         fp->fw_sb_id;
8714
8715                 /*
8716                  * CQ index among the SB indices: FCoE clients uses the default
8717                  * SB, therefore it's different.
8718                  */
8719                 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8720                 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8721         }
8722
8723         /* set maximum number of COSs supported by this queue */
8724         init_params->max_cos = fp->max_cos;
8725
8726         DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8727             fp->index, init_params->max_cos);
8728
8729         /* set the context pointers queue object */
8730         for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8731                 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8732                 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8733                                 ILT_PAGE_CIDS);
8734                 init_params->cxts[cos] =
8735                         &bp->context[cxt_index].vcxt[cxt_offset].eth;
8736         }
8737 }
8738
8739 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8740                         struct bnx2x_queue_state_params *q_params,
8741                         struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8742                         int tx_index, bool leading)
8743 {
8744         memset(tx_only_params, 0, sizeof(*tx_only_params));
8745
8746         /* Set the command */
8747         q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8748
8749         /* Set tx-only QUEUE flags: don't zero statistics */
8750         tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8751
8752         /* choose the index of the cid to send the slow path on */
8753         tx_only_params->cid_index = tx_index;
8754
8755         /* Set general TX_ONLY_SETUP parameters */
8756         bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8757
8758         /* Set Tx TX_ONLY_SETUP parameters */
8759         bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8760
8761         DP(NETIF_MSG_IFUP,
8762            "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8763            tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8764            q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8765            tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8766
8767         /* send the ramrod */
8768         return bnx2x_queue_state_change(bp, q_params);
8769 }
8770
8771 /**
8772  * bnx2x_setup_queue - setup queue
8773  *
8774  * @bp:         driver handle
8775  * @fp:         pointer to fastpath
8776  * @leading:    is leading
8777  *
8778  * This function performs 2 steps in a Queue state machine
8779  *      actually: 1) RESET->INIT 2) INIT->SETUP
8780  */
8781
8782 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8783                        bool leading)
8784 {
8785         struct bnx2x_queue_state_params q_params = {NULL};
8786         struct bnx2x_queue_setup_params *setup_params =
8787                                                 &q_params.params.setup;
8788         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8789                                                 &q_params.params.tx_only;
8790         int rc;
8791         u8 tx_index;
8792
8793         DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8794
8795         /* reset IGU state skip FCoE L2 queue */
8796         if (!IS_FCOE_FP(fp))
8797                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8798                              IGU_INT_ENABLE, 0);
8799
8800         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8801         /* We want to wait for completion in this context */
8802         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8803
8804         /* Prepare the INIT parameters */
8805         bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8806
8807         /* Set the command */
8808         q_params.cmd = BNX2X_Q_CMD_INIT;
8809
8810         /* Change the state to INIT */
8811         rc = bnx2x_queue_state_change(bp, &q_params);
8812         if (rc) {
8813                 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8814                 return rc;
8815         }
8816
8817         DP(NETIF_MSG_IFUP, "init complete\n");
8818
8819         /* Now move the Queue to the SETUP state... */
8820         memset(setup_params, 0, sizeof(*setup_params));
8821
8822         /* Set QUEUE flags */
8823         setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8824
8825         /* Set general SETUP parameters */
8826         bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8827                                 FIRST_TX_COS_INDEX);
8828
8829         bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8830                             &setup_params->rxq_params);
8831
8832         bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8833                            FIRST_TX_COS_INDEX);
8834
8835         /* Set the command */
8836         q_params.cmd = BNX2X_Q_CMD_SETUP;
8837
8838         if (IS_FCOE_FP(fp))
8839                 bp->fcoe_init = true;
8840
8841         /* Change the state to SETUP */
8842         rc = bnx2x_queue_state_change(bp, &q_params);
8843         if (rc) {
8844                 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8845                 return rc;
8846         }
8847
8848         /* loop through the relevant tx-only indices */
8849         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8850               tx_index < fp->max_cos;
8851               tx_index++) {
8852
8853                 /* prepare and send tx-only ramrod*/
8854                 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8855                                           tx_only_params, tx_index, leading);
8856                 if (rc) {
8857                         BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8858                                   fp->index, tx_index);
8859                         return rc;
8860                 }
8861         }
8862
8863         return rc;
8864 }
8865
8866 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8867 {
8868         struct bnx2x_fastpath *fp = &bp->fp[index];
8869         struct bnx2x_fp_txdata *txdata;
8870         struct bnx2x_queue_state_params q_params = {NULL};
8871         int rc, tx_index;
8872
8873         DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8874
8875         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8876         /* We want to wait for completion in this context */
8877         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8878
8879         /* close tx-only connections */
8880         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8881              tx_index < fp->max_cos;
8882              tx_index++){
8883
8884                 /* ascertain this is a normal queue*/
8885                 txdata = fp->txdata_ptr[tx_index];
8886
8887                 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8888                                                         txdata->txq_index);
8889
8890                 /* send halt terminate on tx-only connection */
8891                 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8892                 memset(&q_params.params.terminate, 0,
8893                        sizeof(q_params.params.terminate));
8894                 q_params.params.terminate.cid_index = tx_index;
8895
8896                 rc = bnx2x_queue_state_change(bp, &q_params);
8897                 if (rc)
8898                         return rc;
8899
8900                 /* send halt terminate on tx-only connection */
8901                 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8902                 memset(&q_params.params.cfc_del, 0,
8903                        sizeof(q_params.params.cfc_del));
8904                 q_params.params.cfc_del.cid_index = tx_index;
8905                 rc = bnx2x_queue_state_change(bp, &q_params);
8906                 if (rc)
8907                         return rc;
8908         }
8909         /* Stop the primary connection: */
8910         /* ...halt the connection */
8911         q_params.cmd = BNX2X_Q_CMD_HALT;
8912         rc = bnx2x_queue_state_change(bp, &q_params);
8913         if (rc)
8914                 return rc;
8915
8916         /* ...terminate the connection */
8917         q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8918         memset(&q_params.params.terminate, 0,
8919                sizeof(q_params.params.terminate));
8920         q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8921         rc = bnx2x_queue_state_change(bp, &q_params);
8922         if (rc)
8923                 return rc;
8924         /* ...delete cfc entry */
8925         q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8926         memset(&q_params.params.cfc_del, 0,
8927                sizeof(q_params.params.cfc_del));
8928         q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8929         return bnx2x_queue_state_change(bp, &q_params);
8930 }
8931
8932 static void bnx2x_reset_func(struct bnx2x *bp)
8933 {
8934         int port = BP_PORT(bp);
8935         int func = BP_FUNC(bp);
8936         int i;
8937
8938         /* Disable the function in the FW */
8939         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8940         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8941         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8942         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8943
8944         /* FP SBs */
8945         for_each_eth_queue(bp, i) {
8946                 struct bnx2x_fastpath *fp = &bp->fp[i];
8947                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8948                            CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8949                            SB_DISABLED);
8950         }
8951
8952         if (CNIC_LOADED(bp))
8953                 /* CNIC SB */
8954                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8955                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8956                         (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8957
8958         /* SP SB */
8959         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8960                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8961                 SB_DISABLED);
8962
8963         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8964                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8965                        0);
8966
8967         /* Configure IGU */
8968         if (bp->common.int_block == INT_BLOCK_HC) {
8969                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8970                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8971         } else {
8972                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8973                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8974         }
8975
8976         if (CNIC_LOADED(bp)) {
8977                 /* Disable Timer scan */
8978                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8979                 /*
8980                  * Wait for at least 10ms and up to 2 second for the timers
8981                  * scan to complete
8982                  */
8983                 for (i = 0; i < 200; i++) {
8984                         usleep_range(10000, 20000);
8985                         if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8986                                 break;
8987                 }
8988         }
8989         /* Clear ILT */
8990         bnx2x_clear_func_ilt(bp, func);
8991
8992         /* Timers workaround bug for E2: if this is vnic-3,
8993          * we need to set the entire ilt range for this timers.
8994          */
8995         if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8996                 struct ilt_client_info ilt_cli;
8997                 /* use dummy TM client */
8998                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8999                 ilt_cli.start = 0;
9000                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9001                 ilt_cli.client_num = ILT_CLIENT_TM;
9002
9003                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9004         }
9005
9006         /* this assumes that reset_port() called before reset_func()*/
9007         if (!CHIP_IS_E1x(bp))
9008                 bnx2x_pf_disable(bp);
9009
9010         bp->dmae_ready = 0;
9011 }
9012
9013 static void bnx2x_reset_port(struct bnx2x *bp)
9014 {
9015         int port = BP_PORT(bp);
9016         u32 val;
9017
9018         /* Reset physical Link */
9019         bnx2x__link_reset(bp);
9020
9021         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9022
9023         /* Do not rcv packets to BRB */
9024         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9025         /* Do not direct rcv packets that are not for MCP to the BRB */
9026         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9027                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9028
9029         /* Configure AEU */
9030         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9031
9032         msleep(100);
9033         /* Check for BRB port occupancy */
9034         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9035         if (val)
9036                 DP(NETIF_MSG_IFDOWN,
9037                    "BRB1 is not empty  %d blocks are occupied\n", val);
9038
9039         /* TODO: Close Doorbell port? */
9040 }
9041
9042 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9043 {
9044         struct bnx2x_func_state_params func_params = {NULL};
9045
9046         /* Prepare parameters for function state transitions */
9047         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9048
9049         func_params.f_obj = &bp->func_obj;
9050         func_params.cmd = BNX2X_F_CMD_HW_RESET;
9051
9052         func_params.params.hw_init.load_phase = load_code;
9053
9054         return bnx2x_func_state_change(bp, &func_params);
9055 }
9056
9057 static int bnx2x_func_stop(struct bnx2x *bp)
9058 {
9059         struct bnx2x_func_state_params func_params = {NULL};
9060         int rc;
9061
9062         /* Prepare parameters for function state transitions */
9063         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9064         func_params.f_obj = &bp->func_obj;
9065         func_params.cmd = BNX2X_F_CMD_STOP;
9066
9067         /*
9068          * Try to stop the function the 'good way'. If fails (in case
9069          * of a parity error during bnx2x_chip_cleanup()) and we are
9070          * not in a debug mode, perform a state transaction in order to
9071          * enable further HW_RESET transaction.
9072          */
9073         rc = bnx2x_func_state_change(bp, &func_params);
9074         if (rc) {
9075 #ifdef BNX2X_STOP_ON_ERROR
9076                 return rc;
9077 #else
9078                 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9079                 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9080                 return bnx2x_func_state_change(bp, &func_params);
9081 #endif
9082         }
9083
9084         return 0;
9085 }
9086
9087 /**
9088  * bnx2x_send_unload_req - request unload mode from the MCP.
9089  *
9090  * @bp:                 driver handle
9091  * @unload_mode:        requested function's unload mode
9092  *
9093  * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
9094  */
9095 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9096 {
9097         u32 reset_code = 0;
9098         int port = BP_PORT(bp);
9099
9100         /* Select the UNLOAD request mode */
9101         if (unload_mode == UNLOAD_NORMAL)
9102                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9103
9104         else if (bp->flags & NO_WOL_FLAG)
9105                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9106
9107         else if (bp->wol) {
9108                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9109                 u8 *mac_addr = bp->dev->dev_addr;
9110                 struct pci_dev *pdev = bp->pdev;
9111                 u32 val;
9112                 u16 pmc;
9113
9114                 /* The mac address is written to entries 1-4 to
9115                  * preserve entry 0 which is used by the PMF
9116                  */
9117                 u8 entry = (BP_VN(bp) + 1)*8;
9118
9119                 val = (mac_addr[0] << 8) | mac_addr[1];
9120                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9121
9122                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9123                       (mac_addr[4] << 8) | mac_addr[5];
9124                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9125
9126                 /* Enable the PME and clear the status */
9127                 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9128                 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9129                 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9130
9131                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9132
9133         } else
9134                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9135
9136         /* Send the request to the MCP */
9137         if (!BP_NOMCP(bp))
9138                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9139         else {
9140                 int path = BP_PATH(bp);
9141
9142                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
9143                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9144                    bnx2x_load_count[path][2]);
9145                 bnx2x_load_count[path][0]--;
9146                 bnx2x_load_count[path][1 + port]--;
9147                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
9148                    path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9149                    bnx2x_load_count[path][2]);
9150                 if (bnx2x_load_count[path][0] == 0)
9151                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9152                 else if (bnx2x_load_count[path][1 + port] == 0)
9153                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9154                 else
9155                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9156         }
9157
9158         return reset_code;
9159 }
9160
9161 /**
9162  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
9163  *
9164  * @bp:         driver handle
9165  * @keep_link:          true iff link should be kept up
9166  */
9167 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9168 {
9169         u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9170
9171         /* Report UNLOAD_DONE to MCP */
9172         if (!BP_NOMCP(bp))
9173                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9174 }
9175
9176 static int bnx2x_func_wait_started(struct bnx2x *bp)
9177 {
9178         int tout = 50;
9179         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9180
9181         if (!bp->port.pmf)
9182                 return 0;
9183
9184         /*
9185          * (assumption: No Attention from MCP at this stage)
9186          * PMF probably in the middle of TX disable/enable transaction
9187          * 1. Sync IRS for default SB
9188          * 2. Sync SP queue - this guarantees us that attention handling started
9189          * 3. Wait, that TX disable/enable transaction completes
9190          *
9191          * 1+2 guarantee that if DCBx attention was scheduled it already changed
9192          * pending bit of transaction from STARTED-->TX_STOPPED, if we already
9193          * received completion for the transaction the state is TX_STOPPED.
9194          * State will return to STARTED after completion of TX_STOPPED-->STARTED
9195          * transaction.
9196          */
9197
9198         /* make sure default SB ISR is done */
9199         if (msix)
9200                 synchronize_irq(bp->msix_table[0].vector);
9201         else
9202                 synchronize_irq(bp->pdev->irq);
9203
9204         flush_workqueue(bnx2x_wq);
9205         flush_workqueue(bnx2x_iov_wq);
9206
9207         while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9208                                 BNX2X_F_STATE_STARTED && tout--)
9209                 msleep(20);
9210
9211         if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9212                                                 BNX2X_F_STATE_STARTED) {
9213 #ifdef BNX2X_STOP_ON_ERROR
9214                 BNX2X_ERR("Wrong function state\n");
9215                 return -EBUSY;
9216 #else
9217                 /*
9218                  * Failed to complete the transaction in a "good way"
9219                  * Force both transactions with CLR bit
9220                  */
9221                 struct bnx2x_func_state_params func_params = {NULL};
9222
9223                 DP(NETIF_MSG_IFDOWN,
9224                    "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9225
9226                 func_params.f_obj = &bp->func_obj;
9227                 __set_bit(RAMROD_DRV_CLR_ONLY,
9228                                         &func_params.ramrod_flags);
9229
9230                 /* STARTED-->TX_ST0PPED */
9231                 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9232                 bnx2x_func_state_change(bp, &func_params);
9233
9234                 /* TX_ST0PPED-->STARTED */
9235                 func_params.cmd = BNX2X_F_CMD_TX_START;
9236                 return bnx2x_func_state_change(bp, &func_params);
9237 #endif
9238         }
9239
9240         return 0;
9241 }
9242
9243 static void bnx2x_disable_ptp(struct bnx2x *bp)
9244 {
9245         int port = BP_PORT(bp);
9246
9247         /* Disable sending PTP packets to host */
9248         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9249                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9250
9251         /* Reset PTP event detection rules */
9252         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9253                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9254         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9255                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9256         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9257                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9258         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9259                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9260
9261         /* Disable the PTP feature */
9262         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9263                NIG_REG_P0_PTP_EN, 0x0);
9264 }
9265
9266 /* Called during unload, to stop PTP-related stuff */
9267 static void bnx2x_stop_ptp(struct bnx2x *bp)
9268 {
9269         /* Cancel PTP work queue. Should be done after the Tx queues are
9270          * drained to prevent additional scheduling.
9271          */
9272         cancel_work_sync(&bp->ptp_task);
9273
9274         if (bp->ptp_tx_skb) {
9275                 dev_kfree_skb_any(bp->ptp_tx_skb);
9276                 bp->ptp_tx_skb = NULL;
9277         }
9278
9279         /* Disable PTP in HW */
9280         bnx2x_disable_ptp(bp);
9281
9282         DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9283 }
9284
9285 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9286 {
9287         int port = BP_PORT(bp);
9288         int i, rc = 0;
9289         u8 cos;
9290         struct bnx2x_mcast_ramrod_params rparam = {NULL};
9291         u32 reset_code;
9292
9293         /* Wait until tx fastpath tasks complete */
9294         for_each_tx_queue(bp, i) {
9295                 struct bnx2x_fastpath *fp = &bp->fp[i];
9296
9297                 for_each_cos_in_tx_queue(fp, cos)
9298                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9299 #ifdef BNX2X_STOP_ON_ERROR
9300                 if (rc)
9301                         return;
9302 #endif
9303         }
9304
9305         /* Give HW time to discard old tx messages */
9306         usleep_range(1000, 2000);
9307
9308         /* Clean all ETH MACs */
9309         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9310                                 false);
9311         if (rc < 0)
9312                 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9313
9314         /* Clean up UC list  */
9315         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9316                                 true);
9317         if (rc < 0)
9318                 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9319                           rc);
9320
9321         /* Disable LLH */
9322         if (!CHIP_IS_E1(bp))
9323                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9324
9325         /* Set "drop all" (stop Rx).
9326          * We need to take a netif_addr_lock() here in order to prevent
9327          * a race between the completion code and this code.
9328          */
9329         netif_addr_lock_bh(bp->dev);
9330         /* Schedule the rx_mode command */
9331         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9332                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9333         else
9334                 bnx2x_set_storm_rx_mode(bp);
9335
9336         /* Cleanup multicast configuration */
9337         rparam.mcast_obj = &bp->mcast_obj;
9338         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9339         if (rc < 0)
9340                 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9341
9342         netif_addr_unlock_bh(bp->dev);
9343
9344         bnx2x_iov_chip_cleanup(bp);
9345
9346         /*
9347          * Send the UNLOAD_REQUEST to the MCP. This will return if
9348          * this function should perform FUNC, PORT or COMMON HW
9349          * reset.
9350          */
9351         reset_code = bnx2x_send_unload_req(bp, unload_mode);
9352
9353         /*
9354          * (assumption: No Attention from MCP at this stage)
9355          * PMF probably in the middle of TX disable/enable transaction
9356          */
9357         rc = bnx2x_func_wait_started(bp);
9358         if (rc) {
9359                 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9360 #ifdef BNX2X_STOP_ON_ERROR
9361                 return;
9362 #endif
9363         }
9364
9365         /* Close multi and leading connections
9366          * Completions for ramrods are collected in a synchronous way
9367          */
9368         for_each_eth_queue(bp, i)
9369                 if (bnx2x_stop_queue(bp, i))
9370 #ifdef BNX2X_STOP_ON_ERROR
9371                         return;
9372 #else
9373                         goto unload_error;
9374 #endif
9375
9376         if (CNIC_LOADED(bp)) {
9377                 for_each_cnic_queue(bp, i)
9378                         if (bnx2x_stop_queue(bp, i))
9379 #ifdef BNX2X_STOP_ON_ERROR
9380                                 return;
9381 #else
9382                                 goto unload_error;
9383 #endif
9384         }
9385
9386         /* If SP settings didn't get completed so far - something
9387          * very wrong has happen.
9388          */
9389         if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9390                 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9391
9392 #ifndef BNX2X_STOP_ON_ERROR
9393 unload_error:
9394 #endif
9395         rc = bnx2x_func_stop(bp);
9396         if (rc) {
9397                 BNX2X_ERR("Function stop failed!\n");
9398 #ifdef BNX2X_STOP_ON_ERROR
9399                 return;
9400 #endif
9401         }
9402
9403         /* stop_ptp should be after the Tx queues are drained to prevent
9404          * scheduling to the cancelled PTP work queue. It should also be after
9405          * function stop ramrod is sent, since as part of this ramrod FW access
9406          * PTP registers.
9407          */
9408         if (bp->flags & PTP_SUPPORTED)
9409                 bnx2x_stop_ptp(bp);
9410
9411         /* Disable HW interrupts, NAPI */
9412         bnx2x_netif_stop(bp, 1);
9413         /* Delete all NAPI objects */
9414         bnx2x_del_all_napi(bp);
9415         if (CNIC_LOADED(bp))
9416                 bnx2x_del_all_napi_cnic(bp);
9417
9418         /* Release IRQs */
9419         bnx2x_free_irq(bp);
9420
9421         /* Reset the chip */
9422         rc = bnx2x_reset_hw(bp, reset_code);
9423         if (rc)
9424                 BNX2X_ERR("HW_RESET failed\n");
9425
9426         /* Report UNLOAD_DONE to MCP */
9427         bnx2x_send_unload_done(bp, keep_link);
9428 }
9429
9430 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9431 {
9432         u32 val;
9433
9434         DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9435
9436         if (CHIP_IS_E1(bp)) {
9437                 int port = BP_PORT(bp);
9438                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9439                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
9440
9441                 val = REG_RD(bp, addr);
9442                 val &= ~(0x300);
9443                 REG_WR(bp, addr, val);
9444         } else {
9445                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9446                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9447                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9448                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9449         }
9450 }
9451
9452 /* Close gates #2, #3 and #4: */
9453 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9454 {
9455         u32 val;
9456
9457         /* Gates #2 and #4a are closed/opened for "not E1" only */
9458         if (!CHIP_IS_E1(bp)) {
9459                 /* #4 */
9460                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9461                 /* #2 */
9462                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9463         }
9464
9465         /* #3 */
9466         if (CHIP_IS_E1x(bp)) {
9467                 /* Prevent interrupts from HC on both ports */
9468                 val = REG_RD(bp, HC_REG_CONFIG_1);
9469                 REG_WR(bp, HC_REG_CONFIG_1,
9470                        (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9471                        (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9472
9473                 val = REG_RD(bp, HC_REG_CONFIG_0);
9474                 REG_WR(bp, HC_REG_CONFIG_0,
9475                        (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9476                        (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9477         } else {
9478                 /* Prevent incoming interrupts in IGU */
9479                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9480
9481                 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9482                        (!close) ?
9483                        (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9484                        (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9485         }
9486
9487         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9488                 close ? "closing" : "opening");
9489         mmiowb();
9490 }
9491
9492 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
9493
9494 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9495 {
9496         /* Do some magic... */
9497         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9498         *magic_val = val & SHARED_MF_CLP_MAGIC;
9499         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9500 }
9501
9502 /**
9503  * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9504  *
9505  * @bp:         driver handle
9506  * @magic_val:  old value of the `magic' bit.
9507  */
9508 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9509 {
9510         /* Restore the `magic' bit value... */
9511         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9512         MF_CFG_WR(bp, shared_mf_config.clp_mb,
9513                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9514 }
9515
9516 /**
9517  * bnx2x_reset_mcp_prep - prepare for MCP reset.
9518  *
9519  * @bp:         driver handle
9520  * @magic_val:  old value of 'magic' bit.
9521  *
9522  * Takes care of CLP configurations.
9523  */
9524 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9525 {
9526         u32 shmem;
9527         u32 validity_offset;
9528
9529         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9530
9531         /* Set `magic' bit in order to save MF config */
9532         if (!CHIP_IS_E1(bp))
9533                 bnx2x_clp_reset_prep(bp, magic_val);
9534
9535         /* Get shmem offset */
9536         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9537         validity_offset =
9538                 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9539
9540         /* Clear validity map flags */
9541         if (shmem > 0)
9542                 REG_WR(bp, shmem + validity_offset, 0);
9543 }
9544
9545 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
9546 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
9547
9548 /**
9549  * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9550  *
9551  * @bp: driver handle
9552  */
9553 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9554 {
9555         /* special handling for emulation and FPGA,
9556            wait 10 times longer */
9557         if (CHIP_REV_IS_SLOW(bp))
9558                 msleep(MCP_ONE_TIMEOUT*10);
9559         else
9560                 msleep(MCP_ONE_TIMEOUT);
9561 }
9562
9563 /*
9564  * initializes bp->common.shmem_base and waits for validity signature to appear
9565  */
9566 static int bnx2x_init_shmem(struct bnx2x *bp)
9567 {
9568         int cnt = 0;
9569         u32 val = 0;
9570
9571         do {
9572                 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9573                 if (bp->common.shmem_base) {
9574                         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9575                         if (val & SHR_MEM_VALIDITY_MB)
9576                                 return 0;
9577                 }
9578
9579                 bnx2x_mcp_wait_one(bp);
9580
9581         } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9582
9583         BNX2X_ERR("BAD MCP validity signature\n");
9584
9585         return -ENODEV;
9586 }
9587
9588 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9589 {
9590         int rc = bnx2x_init_shmem(bp);
9591
9592         /* Restore the `magic' bit value */
9593         if (!CHIP_IS_E1(bp))
9594                 bnx2x_clp_reset_done(bp, magic_val);
9595
9596         return rc;
9597 }
9598
9599 static void bnx2x_pxp_prep(struct bnx2x *bp)
9600 {
9601         if (!CHIP_IS_E1(bp)) {
9602                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9603                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9604                 mmiowb();
9605         }
9606 }
9607
9608 /*
9609  * Reset the whole chip except for:
9610  *      - PCIE core
9611  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9612  *              one reset bit)
9613  *      - IGU
9614  *      - MISC (including AEU)
9615  *      - GRC
9616  *      - RBCN, RBCP
9617  */
9618 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9619 {
9620         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9621         u32 global_bits2, stay_reset2;
9622
9623         /*
9624          * Bits that have to be set in reset_mask2 if we want to reset 'global'
9625          * (per chip) blocks.
9626          */
9627         global_bits2 =
9628                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9629                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9630
9631         /* Don't reset the following blocks.
9632          * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9633          *            reset, as in 4 port device they might still be owned
9634          *            by the MCP (there is only one leader per path).
9635          */
9636         not_reset_mask1 =
9637                 MISC_REGISTERS_RESET_REG_1_RST_HC |
9638                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9639                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9640
9641         not_reset_mask2 =
9642                 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9643                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9644                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9645                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9646                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9647                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
9648                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9649                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9650                 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9651                 MISC_REGISTERS_RESET_REG_2_PGLC |
9652                 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9653                 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9654                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9655                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9656                 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9657                 MISC_REGISTERS_RESET_REG_2_UMAC1;
9658
9659         /*
9660          * Keep the following blocks in reset:
9661          *  - all xxMACs are handled by the bnx2x_link code.
9662          */
9663         stay_reset2 =
9664                 MISC_REGISTERS_RESET_REG_2_XMAC |
9665                 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9666
9667         /* Full reset masks according to the chip */
9668         reset_mask1 = 0xffffffff;
9669
9670         if (CHIP_IS_E1(bp))
9671                 reset_mask2 = 0xffff;
9672         else if (CHIP_IS_E1H(bp))
9673                 reset_mask2 = 0x1ffff;
9674         else if (CHIP_IS_E2(bp))
9675                 reset_mask2 = 0xfffff;
9676         else /* CHIP_IS_E3 */
9677                 reset_mask2 = 0x3ffffff;
9678
9679         /* Don't reset global blocks unless we need to */
9680         if (!global)
9681                 reset_mask2 &= ~global_bits2;
9682
9683         /*
9684          * In case of attention in the QM, we need to reset PXP
9685          * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9686          * because otherwise QM reset would release 'close the gates' shortly
9687          * before resetting the PXP, then the PSWRQ would send a write
9688          * request to PGLUE. Then when PXP is reset, PGLUE would try to
9689          * read the payload data from PSWWR, but PSWWR would not
9690          * respond. The write queue in PGLUE would stuck, dmae commands
9691          * would not return. Therefore it's important to reset the second
9692          * reset register (containing the
9693          * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9694          * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9695          * bit).
9696          */
9697         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9698                reset_mask2 & (~not_reset_mask2));
9699
9700         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9701                reset_mask1 & (~not_reset_mask1));
9702
9703         barrier();
9704         mmiowb();
9705
9706         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9707                reset_mask2 & (~stay_reset2));
9708
9709         barrier();
9710         mmiowb();
9711
9712         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9713         mmiowb();
9714 }
9715
9716 /**
9717  * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9718  * It should get cleared in no more than 1s.
9719  *
9720  * @bp: driver handle
9721  *
9722  * It should get cleared in no more than 1s. Returns 0 if
9723  * pending writes bit gets cleared.
9724  */
9725 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9726 {
9727         u32 cnt = 1000;
9728         u32 pend_bits = 0;
9729
9730         do {
9731                 pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9732
9733                 if (pend_bits == 0)
9734                         break;
9735
9736                 usleep_range(1000, 2000);
9737         } while (cnt-- > 0);
9738
9739         if (cnt <= 0) {
9740                 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9741                           pend_bits);
9742                 return -EBUSY;
9743         }
9744
9745         return 0;
9746 }
9747
9748 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9749 {
9750         int cnt = 1000;
9751         u32 val = 0;
9752         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9753         u32 tags_63_32 = 0;
9754
9755         /* Empty the Tetris buffer, wait for 1s */
9756         do {
9757                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9758                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9759                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9760                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9761                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9762                 if (CHIP_IS_E3(bp))
9763                         tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9764
9765                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9766                     ((port_is_idle_0 & 0x1) == 0x1) &&
9767                     ((port_is_idle_1 & 0x1) == 0x1) &&
9768                     (pgl_exp_rom2 == 0xffffffff) &&
9769                     (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9770                         break;
9771                 usleep_range(1000, 2000);
9772         } while (cnt-- > 0);
9773
9774         if (cnt <= 0) {
9775                 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9776                 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9777                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9778                           pgl_exp_rom2);
9779                 return -EAGAIN;
9780         }
9781
9782         barrier();
9783
9784         /* Close gates #2, #3 and #4 */
9785         bnx2x_set_234_gates(bp, true);
9786
9787         /* Poll for IGU VQs for 57712 and newer chips */
9788         if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9789                 return -EAGAIN;
9790
9791         /* TBD: Indicate that "process kill" is in progress to MCP */
9792
9793         /* Clear "unprepared" bit */
9794         REG_WR(bp, MISC_REG_UNPREPARED, 0);
9795         barrier();
9796
9797         /* Make sure all is written to the chip before the reset */
9798         mmiowb();
9799
9800         /* Wait for 1ms to empty GLUE and PCI-E core queues,
9801          * PSWHST, GRC and PSWRD Tetris buffer.
9802          */
9803         usleep_range(1000, 2000);
9804
9805         /* Prepare to chip reset: */
9806         /* MCP */
9807         if (global)
9808                 bnx2x_reset_mcp_prep(bp, &val);
9809
9810         /* PXP */
9811         bnx2x_pxp_prep(bp);
9812         barrier();
9813
9814         /* reset the chip */
9815         bnx2x_process_kill_chip_reset(bp, global);
9816         barrier();
9817
9818         /* clear errors in PGB */
9819         if (!CHIP_IS_E1x(bp))
9820                 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9821
9822         /* Recover after reset: */
9823         /* MCP */
9824         if (global && bnx2x_reset_mcp_comp(bp, val))
9825                 return -EAGAIN;
9826
9827         /* TBD: Add resetting the NO_MCP mode DB here */
9828
9829         /* Open the gates #2, #3 and #4 */
9830         bnx2x_set_234_gates(bp, false);
9831
9832         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9833          * reset state, re-enable attentions. */
9834
9835         return 0;
9836 }
9837
9838 static int bnx2x_leader_reset(struct bnx2x *bp)
9839 {
9840         int rc = 0;
9841         bool global = bnx2x_reset_is_global(bp);
9842         u32 load_code;
9843
9844         /* if not going to reset MCP - load "fake" driver to reset HW while
9845          * driver is owner of the HW
9846          */
9847         if (!global && !BP_NOMCP(bp)) {
9848                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9849                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9850                 if (!load_code) {
9851                         BNX2X_ERR("MCP response failure, aborting\n");
9852                         rc = -EAGAIN;
9853                         goto exit_leader_reset;
9854                 }
9855                 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9856                     (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9857                         BNX2X_ERR("MCP unexpected resp, aborting\n");
9858                         rc = -EAGAIN;
9859                         goto exit_leader_reset2;
9860                 }
9861                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9862                 if (!load_code) {
9863                         BNX2X_ERR("MCP response failure, aborting\n");
9864                         rc = -EAGAIN;
9865                         goto exit_leader_reset2;
9866                 }
9867         }
9868
9869         /* Try to recover after the failure */
9870         if (bnx2x_process_kill(bp, global)) {
9871                 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9872                           BP_PATH(bp));
9873                 rc = -EAGAIN;
9874                 goto exit_leader_reset2;
9875         }
9876
9877         /*
9878          * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9879          * state.
9880          */
9881         bnx2x_set_reset_done(bp);
9882         if (global)
9883                 bnx2x_clear_reset_global(bp);
9884
9885 exit_leader_reset2:
9886         /* unload "fake driver" if it was loaded */
9887         if (!global && !BP_NOMCP(bp)) {
9888                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9889                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9890         }
9891 exit_leader_reset:
9892         bp->is_leader = 0;
9893         bnx2x_release_leader_lock(bp);
9894         smp_mb();
9895         return rc;
9896 }
9897
9898 static void bnx2x_recovery_failed(struct bnx2x *bp)
9899 {
9900         netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9901
9902         /* Disconnect this device */
9903         netif_device_detach(bp->dev);
9904
9905         /*
9906          * Block ifup for all function on this engine until "process kill"
9907          * or power cycle.
9908          */
9909         bnx2x_set_reset_in_progress(bp);
9910
9911         /* Shut down the power */
9912         bnx2x_set_power_state(bp, PCI_D3hot);
9913
9914         bp->recovery_state = BNX2X_RECOVERY_FAILED;
9915
9916         smp_mb();
9917 }
9918
9919 /*
9920  * Assumption: runs under rtnl lock. This together with the fact
9921  * that it's called only from bnx2x_sp_rtnl() ensure that it
9922  * will never be called when netif_running(bp->dev) is false.
9923  */
9924 static void bnx2x_parity_recover(struct bnx2x *bp)
9925 {
9926         bool global = false;
9927         u32 error_recovered, error_unrecovered;
9928         bool is_parity;
9929
9930         DP(NETIF_MSG_HW, "Handling parity\n");
9931         while (1) {
9932                 switch (bp->recovery_state) {
9933                 case BNX2X_RECOVERY_INIT:
9934                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9935                         is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9936                         WARN_ON(!is_parity);
9937
9938                         /* Try to get a LEADER_LOCK HW lock */
9939                         if (bnx2x_trylock_leader_lock(bp)) {
9940                                 bnx2x_set_reset_in_progress(bp);
9941                                 /*
9942                                  * Check if there is a global attention and if
9943                                  * there was a global attention, set the global
9944                                  * reset bit.
9945                                  */
9946
9947                                 if (global)
9948                                         bnx2x_set_reset_global(bp);
9949
9950                                 bp->is_leader = 1;
9951                         }
9952
9953                         /* Stop the driver */
9954                         /* If interface has been removed - break */
9955                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9956                                 return;
9957
9958                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
9959
9960                         /* Ensure "is_leader", MCP command sequence and
9961                          * "recovery_state" update values are seen on other
9962                          * CPUs.
9963                          */
9964                         smp_mb();
9965                         break;
9966
9967                 case BNX2X_RECOVERY_WAIT:
9968                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9969                         if (bp->is_leader) {
9970                                 int other_engine = BP_PATH(bp) ? 0 : 1;
9971                                 bool other_load_status =
9972                                         bnx2x_get_load_status(bp, other_engine);
9973                                 bool load_status =
9974                                         bnx2x_get_load_status(bp, BP_PATH(bp));
9975                                 global = bnx2x_reset_is_global(bp);
9976
9977                                 /*
9978                                  * In case of a parity in a global block, let
9979                                  * the first leader that performs a
9980                                  * leader_reset() reset the global blocks in
9981                                  * order to clear global attentions. Otherwise
9982                                  * the gates will remain closed for that
9983                                  * engine.
9984                                  */
9985                                 if (load_status ||
9986                                     (global && other_load_status)) {
9987                                         /* Wait until all other functions get
9988                                          * down.
9989                                          */
9990                                         schedule_delayed_work(&bp->sp_rtnl_task,
9991                                                                 HZ/10);
9992                                         return;
9993                                 } else {
9994                                         /* If all other functions got down -
9995                                          * try to bring the chip back to
9996                                          * normal. In any case it's an exit
9997                                          * point for a leader.
9998                                          */
9999                                         if (bnx2x_leader_reset(bp)) {
10000                                                 bnx2x_recovery_failed(bp);
10001                                                 return;
10002                                         }
10003
10004                                         /* If we are here, means that the
10005                                          * leader has succeeded and doesn't
10006                                          * want to be a leader any more. Try
10007                                          * to continue as a none-leader.
10008                                          */
10009                                         break;
10010                                 }
10011                         } else { /* non-leader */
10012                                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10013                                         /* Try to get a LEADER_LOCK HW lock as
10014                                          * long as a former leader may have
10015                                          * been unloaded by the user or
10016                                          * released a leadership by another
10017                                          * reason.
10018                                          */
10019                                         if (bnx2x_trylock_leader_lock(bp)) {
10020                                                 /* I'm a leader now! Restart a
10021                                                  * switch case.
10022                                                  */
10023                                                 bp->is_leader = 1;
10024                                                 break;
10025                                         }
10026
10027                                         schedule_delayed_work(&bp->sp_rtnl_task,
10028                                                                 HZ/10);
10029                                         return;
10030
10031                                 } else {
10032                                         /*
10033                                          * If there was a global attention, wait
10034                                          * for it to be cleared.
10035                                          */
10036                                         if (bnx2x_reset_is_global(bp)) {
10037                                                 schedule_delayed_work(
10038                                                         &bp->sp_rtnl_task,
10039                                                         HZ/10);
10040                                                 return;
10041                                         }
10042
10043                                         error_recovered =
10044                                           bp->eth_stats.recoverable_error;
10045                                         error_unrecovered =
10046                                           bp->eth_stats.unrecoverable_error;
10047                                         bp->recovery_state =
10048                                                 BNX2X_RECOVERY_NIC_LOADING;
10049                                         if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10050                                                 error_unrecovered++;
10051                                                 netdev_err(bp->dev,
10052                                                            "Recovery failed. Power cycle needed\n");
10053                                                 /* Disconnect this device */
10054                                                 netif_device_detach(bp->dev);
10055                                                 /* Shut down the power */
10056                                                 bnx2x_set_power_state(
10057                                                         bp, PCI_D3hot);
10058                                                 smp_mb();
10059                                         } else {
10060                                                 bp->recovery_state =
10061                                                         BNX2X_RECOVERY_DONE;
10062                                                 error_recovered++;
10063                                                 smp_mb();
10064                                         }
10065                                         bp->eth_stats.recoverable_error =
10066                                                 error_recovered;
10067                                         bp->eth_stats.unrecoverable_error =
10068                                                 error_unrecovered;
10069
10070                                         return;
10071                                 }
10072                         }
10073                 default:
10074                         return;
10075                 }
10076         }
10077 }
10078
10079 #if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10080 static int bnx2x_udp_port_update(struct bnx2x *bp)
10081 {
10082         struct bnx2x_func_switch_update_params *switch_update_params;
10083         struct bnx2x_func_state_params func_params = {NULL};
10084         struct bnx2x_udp_tunnel *udp_tunnel;
10085         u16 vxlan_port = 0, geneve_port = 0;
10086         int rc;
10087
10088         switch_update_params = &func_params.params.switch_update;
10089
10090         /* Prepare parameters for function state transitions */
10091         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10092         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10093
10094         func_params.f_obj = &bp->func_obj;
10095         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10096
10097         /* Function parameters */
10098         __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10099                   &switch_update_params->changes);
10100
10101         if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) {
10102                 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10103                 geneve_port = udp_tunnel->dst_port;
10104                 switch_update_params->geneve_dst_port = geneve_port;
10105         }
10106
10107         if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) {
10108                 udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10109                 vxlan_port = udp_tunnel->dst_port;
10110                 switch_update_params->vxlan_dst_port = vxlan_port;
10111         }
10112
10113         /* Re-enable inner-rss for the offloaded UDP tunnels */
10114         __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10115                   &switch_update_params->changes);
10116
10117         rc = bnx2x_func_state_change(bp, &func_params);
10118         if (rc)
10119                 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10120                           vxlan_port, geneve_port, rc);
10121         else
10122                 DP(BNX2X_MSG_SP,
10123                    "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10124                    vxlan_port, geneve_port);
10125
10126         return rc;
10127 }
10128
10129 static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port,
10130                                  enum bnx2x_udp_port_type type)
10131 {
10132         struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10133
10134         if (!netif_running(bp->dev) || !IS_PF(bp))
10135                 return;
10136
10137         if (udp_port->count && udp_port->dst_port == port) {
10138                 udp_port->count++;
10139                 return;
10140         }
10141
10142         if (udp_port->count) {
10143                 DP(BNX2X_MSG_SP,
10144                    "UDP tunnel [%d] -  destination port limit reached\n",
10145                    type);
10146                 return;
10147         }
10148
10149         udp_port->dst_port = port;
10150         udp_port->count = 1;
10151         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10152 }
10153
10154 static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
10155                                  enum bnx2x_udp_port_type type)
10156 {
10157         struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type];
10158
10159         if (!IS_PF(bp))
10160                 return;
10161
10162         if (!udp_port->count || udp_port->dst_port != port) {
10163                 DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n",
10164                    type);
10165                 return;
10166         }
10167
10168         /* Remove reference, and make certain it's no longer in use */
10169         udp_port->count--;
10170         if (udp_port->count)
10171                 return;
10172         udp_port->dst_port = 0;
10173
10174         if (netif_running(bp->dev))
10175                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0);
10176         else
10177                 DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
10178                    type, port);
10179 }
10180 #endif
10181
10182 #ifdef CONFIG_BNX2X_VXLAN
10183 static void bnx2x_add_vxlan_port(struct net_device *netdev,
10184                                  sa_family_t sa_family, __be16 port)
10185 {
10186         struct bnx2x *bp = netdev_priv(netdev);
10187         u16 t_port = ntohs(port);
10188
10189         __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10190 }
10191
10192 static void bnx2x_del_vxlan_port(struct net_device *netdev,
10193                                  sa_family_t sa_family, __be16 port)
10194 {
10195         struct bnx2x *bp = netdev_priv(netdev);
10196         u16 t_port = ntohs(port);
10197
10198         __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
10199 }
10200 #endif
10201
10202 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
10203 static void bnx2x_add_geneve_port(struct net_device *netdev,
10204                                   sa_family_t sa_family, __be16 port)
10205 {
10206         struct bnx2x *bp = netdev_priv(netdev);
10207         u16 t_port = ntohs(port);
10208
10209         __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10210 }
10211
10212 static void bnx2x_del_geneve_port(struct net_device *netdev,
10213                                   sa_family_t sa_family, __be16 port)
10214 {
10215         struct bnx2x *bp = netdev_priv(netdev);
10216         u16 t_port = ntohs(port);
10217
10218         __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
10219 }
10220 #endif
10221
10222 static int bnx2x_close(struct net_device *dev);
10223
10224 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
10225  * scheduled on a general queue in order to prevent a dead lock.
10226  */
10227 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10228 {
10229         struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10230
10231         rtnl_lock();
10232
10233         if (!netif_running(bp->dev)) {
10234                 rtnl_unlock();
10235                 return;
10236         }
10237
10238         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10239 #ifdef BNX2X_STOP_ON_ERROR
10240                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10241                           "you will need to reboot when done\n");
10242                 goto sp_rtnl_not_reset;
10243 #endif
10244                 /*
10245                  * Clear all pending SP commands as we are going to reset the
10246                  * function anyway.
10247                  */
10248                 bp->sp_rtnl_state = 0;
10249                 smp_mb();
10250
10251                 bnx2x_parity_recover(bp);
10252
10253                 rtnl_unlock();
10254                 return;
10255         }
10256
10257         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10258 #ifdef BNX2X_STOP_ON_ERROR
10259                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10260                           "you will need to reboot when done\n");
10261                 goto sp_rtnl_not_reset;
10262 #endif
10263
10264                 /*
10265                  * Clear all pending SP commands as we are going to reset the
10266                  * function anyway.
10267                  */
10268                 bp->sp_rtnl_state = 0;
10269                 smp_mb();
10270
10271                 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10272                 bnx2x_nic_load(bp, LOAD_NORMAL);
10273
10274                 rtnl_unlock();
10275                 return;
10276         }
10277 #ifdef BNX2X_STOP_ON_ERROR
10278 sp_rtnl_not_reset:
10279 #endif
10280         if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10281                 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10282         if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10283                 bnx2x_after_function_update(bp);
10284         /*
10285          * in case of fan failure we need to reset id if the "stop on error"
10286          * debug flag is set, since we trying to prevent permanent overheating
10287          * damage
10288          */
10289         if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10290                 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10291                 netif_device_detach(bp->dev);
10292                 bnx2x_close(bp->dev);
10293                 rtnl_unlock();
10294                 return;
10295         }
10296
10297         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10298                 DP(BNX2X_MSG_SP,
10299                    "sending set mcast vf pf channel message from rtnl sp-task\n");
10300                 bnx2x_vfpf_set_mcast(bp->dev);
10301         }
10302         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10303                                &bp->sp_rtnl_state)){
10304                 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
10305                         bnx2x_tx_disable(bp);
10306                         BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10307                 }
10308         }
10309
10310         if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10311                 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10312                 bnx2x_set_rx_mode_inner(bp);
10313         }
10314
10315         if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10316                                &bp->sp_rtnl_state))
10317                 bnx2x_pf_set_vfs_vlan(bp);
10318
10319         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10320                 bnx2x_dcbx_stop_hw_tx(bp);
10321                 bnx2x_dcbx_resume_hw_tx(bp);
10322         }
10323
10324         if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10325                                &bp->sp_rtnl_state))
10326                 bnx2x_update_mng_version(bp);
10327
10328 #if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
10329         if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
10330                                &bp->sp_rtnl_state)) {
10331                 if (bnx2x_udp_port_update(bp)) {
10332                         /* On error, forget configuration */
10333                         memset(bp->udp_tunnel_ports, 0,
10334                                sizeof(struct bnx2x_udp_tunnel) *
10335                                BNX2X_UDP_PORT_MAX);
10336                 } else {
10337                         /* Since we don't store additional port information,
10338                          * if no port is configured for any feature ask for
10339                          * information about currently configured ports.
10340                          */
10341 #ifdef CONFIG_BNX2X_VXLAN
10342                         if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
10343                                 vxlan_get_rx_port(bp->dev);
10344 #endif
10345 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
10346                         if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
10347                                 geneve_get_rx_port(bp->dev);
10348 #endif
10349                 }
10350         }
10351 #endif
10352
10353         /* work which needs rtnl lock not-taken (as it takes the lock itself and
10354          * can be called from other contexts as well)
10355          */
10356         rtnl_unlock();
10357
10358         /* enable SR-IOV if applicable */
10359         if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10360                                                &bp->sp_rtnl_state)) {
10361                 bnx2x_disable_sriov(bp);
10362                 bnx2x_enable_sriov(bp);
10363         }
10364 }
10365
10366 static void bnx2x_period_task(struct work_struct *work)
10367 {
10368         struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10369
10370         if (!netif_running(bp->dev))
10371                 goto period_task_exit;
10372
10373         if (CHIP_REV_IS_SLOW(bp)) {
10374                 BNX2X_ERR("period task called on emulation, ignoring\n");
10375                 goto period_task_exit;
10376         }
10377
10378         bnx2x_acquire_phy_lock(bp);
10379         /*
10380          * The barrier is needed to ensure the ordering between the writing to
10381          * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
10382          * the reading here.
10383          */
10384         smp_mb();
10385         if (bp->port.pmf) {
10386                 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10387
10388                 /* Re-queue task in 1 sec */
10389                 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10390         }
10391
10392         bnx2x_release_phy_lock(bp);
10393 period_task_exit:
10394         return;
10395 }
10396
10397 /*
10398  * Init service functions
10399  */
10400
10401 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10402 {
10403         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10404         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10405         return base + (BP_ABS_FUNC(bp)) * stride;
10406 }
10407
10408 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10409                                          u8 port, u32 reset_reg,
10410                                          struct bnx2x_mac_vals *vals)
10411 {
10412         u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10413         u32 base_addr;
10414
10415         if (!(mask & reset_reg))
10416                 return false;
10417
10418         BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10419         base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10420         vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10421         vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10422         REG_WR(bp, vals->umac_addr[port], 0);
10423
10424         return true;
10425 }
10426
10427 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10428                                         struct bnx2x_mac_vals *vals)
10429 {
10430         u32 val, base_addr, offset, mask, reset_reg;
10431         bool mac_stopped = false;
10432         u8 port = BP_PORT(bp);
10433
10434         /* reset addresses as they also mark which values were changed */
10435         memset(vals, 0, sizeof(*vals));
10436
10437         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10438
10439         if (!CHIP_IS_E3(bp)) {
10440                 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10441                 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10442                 if ((mask & reset_reg) && val) {
10443                         u32 wb_data[2];
10444                         BNX2X_DEV_INFO("Disable bmac Rx\n");
10445                         base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10446                                                 : NIG_REG_INGRESS_BMAC0_MEM;
10447                         offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10448                                                 : BIGMAC_REGISTER_BMAC_CONTROL;
10449
10450                         /*
10451                          * use rd/wr since we cannot use dmae. This is safe
10452                          * since MCP won't access the bus due to the request
10453                          * to unload, and no function on the path can be
10454                          * loaded at this time.
10455                          */
10456                         wb_data[0] = REG_RD(bp, base_addr + offset);
10457                         wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10458                         vals->bmac_addr = base_addr + offset;
10459                         vals->bmac_val[0] = wb_data[0];
10460                         vals->bmac_val[1] = wb_data[1];
10461                         wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10462                         REG_WR(bp, vals->bmac_addr, wb_data[0]);
10463                         REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10464                 }
10465                 BNX2X_DEV_INFO("Disable emac Rx\n");
10466                 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10467                 vals->emac_val = REG_RD(bp, vals->emac_addr);
10468                 REG_WR(bp, vals->emac_addr, 0);
10469                 mac_stopped = true;
10470         } else {
10471                 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10472                         BNX2X_DEV_INFO("Disable xmac Rx\n");
10473                         base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10474                         val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10475                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10476                                val & ~(1 << 1));
10477                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10478                                val | (1 << 1));
10479                         vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10480                         vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10481                         REG_WR(bp, vals->xmac_addr, 0);
10482                         mac_stopped = true;
10483                 }
10484
10485                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10486                                                             reset_reg, vals);
10487                 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10488                                                             reset_reg, vals);
10489         }
10490
10491         if (mac_stopped)
10492                 msleep(20);
10493 }
10494
10495 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10496 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10497                                         0x1848 + ((f) << 4))
10498 #define BNX2X_PREV_UNDI_RCQ(val)        ((val) & 0xffff)
10499 #define BNX2X_PREV_UNDI_BD(val)         ((val) >> 16 & 0xffff)
10500 #define BNX2X_PREV_UNDI_PROD(rcq, bd)   ((bd) << 16 | (rcq))
10501
10502 #define BCM_5710_UNDI_FW_MF_MAJOR       (0x07)
10503 #define BCM_5710_UNDI_FW_MF_MINOR       (0x08)
10504 #define BCM_5710_UNDI_FW_MF_VERS        (0x05)
10505
10506 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10507 {
10508         /* UNDI marks its presence in DORQ -
10509          * it initializes CID offset for normal bell to 0x7
10510          */
10511         if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10512             MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10513                 return false;
10514
10515         if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10516                 BNX2X_DEV_INFO("UNDI previously loaded\n");
10517                 return true;
10518         }
10519
10520         return false;
10521 }
10522
10523 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10524 {
10525         u16 rcq, bd;
10526         u32 addr, tmp_reg;
10527
10528         if (BP_FUNC(bp) < 2)
10529                 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10530         else
10531                 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10532
10533         tmp_reg = REG_RD(bp, addr);
10534         rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10535         bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10536
10537         tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10538         REG_WR(bp, addr, tmp_reg);
10539
10540         BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10541                        BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10542 }
10543
10544 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10545 {
10546         u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10547                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10548         if (!rc) {
10549                 BNX2X_ERR("MCP response failure, aborting\n");
10550                 return -EBUSY;
10551         }
10552
10553         return 0;
10554 }
10555
10556 static struct bnx2x_prev_path_list *
10557                 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10558 {
10559         struct bnx2x_prev_path_list *tmp_list;
10560
10561         list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10562                 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10563                     bp->pdev->bus->number == tmp_list->bus &&
10564                     BP_PATH(bp) == tmp_list->path)
10565                         return tmp_list;
10566
10567         return NULL;
10568 }
10569
10570 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10571 {
10572         struct bnx2x_prev_path_list *tmp_list;
10573         int rc;
10574
10575         rc = down_interruptible(&bnx2x_prev_sem);
10576         if (rc) {
10577                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10578                 return rc;
10579         }
10580
10581         tmp_list = bnx2x_prev_path_get_entry(bp);
10582         if (tmp_list) {
10583                 tmp_list->aer = 1;
10584                 rc = 0;
10585         } else {
10586                 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10587                           BP_PATH(bp));
10588         }
10589
10590         up(&bnx2x_prev_sem);
10591
10592         return rc;
10593 }
10594
10595 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10596 {
10597         struct bnx2x_prev_path_list *tmp_list;
10598         bool rc = false;
10599
10600         if (down_trylock(&bnx2x_prev_sem))
10601                 return false;
10602
10603         tmp_list = bnx2x_prev_path_get_entry(bp);
10604         if (tmp_list) {
10605                 if (tmp_list->aer) {
10606                         DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10607                            BP_PATH(bp));
10608                 } else {
10609                         rc = true;
10610                         BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10611                                        BP_PATH(bp));
10612                 }
10613         }
10614
10615         up(&bnx2x_prev_sem);
10616
10617         return rc;
10618 }
10619
10620 bool bnx2x_port_after_undi(struct bnx2x *bp)
10621 {
10622         struct bnx2x_prev_path_list *entry;
10623         bool val;
10624
10625         down(&bnx2x_prev_sem);
10626
10627         entry = bnx2x_prev_path_get_entry(bp);
10628         val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10629
10630         up(&bnx2x_prev_sem);
10631
10632         return val;
10633 }
10634
10635 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10636 {
10637         struct bnx2x_prev_path_list *tmp_list;
10638         int rc;
10639
10640         rc = down_interruptible(&bnx2x_prev_sem);
10641         if (rc) {
10642                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10643                 return rc;
10644         }
10645
10646         /* Check whether the entry for this path already exists */
10647         tmp_list = bnx2x_prev_path_get_entry(bp);
10648         if (tmp_list) {
10649                 if (!tmp_list->aer) {
10650                         BNX2X_ERR("Re-Marking the path.\n");
10651                 } else {
10652                         DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10653                            BP_PATH(bp));
10654                         tmp_list->aer = 0;
10655                 }
10656                 up(&bnx2x_prev_sem);
10657                 return 0;
10658         }
10659         up(&bnx2x_prev_sem);
10660
10661         /* Create an entry for this path and add it */
10662         tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10663         if (!tmp_list) {
10664                 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10665                 return -ENOMEM;
10666         }
10667
10668         tmp_list->bus = bp->pdev->bus->number;
10669         tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10670         tmp_list->path = BP_PATH(bp);
10671         tmp_list->aer = 0;
10672         tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10673
10674         rc = down_interruptible(&bnx2x_prev_sem);
10675         if (rc) {
10676                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10677                 kfree(tmp_list);
10678         } else {
10679                 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10680                    BP_PATH(bp));
10681                 list_add(&tmp_list->list, &bnx2x_prev_list);
10682                 up(&bnx2x_prev_sem);
10683         }
10684
10685         return rc;
10686 }
10687
10688 static int bnx2x_do_flr(struct bnx2x *bp)
10689 {
10690         struct pci_dev *dev = bp->pdev;
10691
10692         if (CHIP_IS_E1x(bp)) {
10693                 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10694                 return -EINVAL;
10695         }
10696
10697         /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
10698         if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10699                 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10700                           bp->common.bc_ver);
10701                 return -EINVAL;
10702         }
10703
10704         if (!pci_wait_for_pending_transaction(dev))
10705                 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10706
10707         BNX2X_DEV_INFO("Initiating FLR\n");
10708         bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10709
10710         return 0;
10711 }
10712
10713 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10714 {
10715         int rc;
10716
10717         BNX2X_DEV_INFO("Uncommon unload Flow\n");
10718
10719         /* Test if previous unload process was already finished for this path */
10720         if (bnx2x_prev_is_path_marked(bp))
10721                 return bnx2x_prev_mcp_done(bp);
10722
10723         BNX2X_DEV_INFO("Path is unmarked\n");
10724
10725         /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
10726         if (bnx2x_prev_is_after_undi(bp))
10727                 goto out;
10728
10729         /* If function has FLR capabilities, and existing FW version matches
10730          * the one required, then FLR will be sufficient to clean any residue
10731          * left by previous driver
10732          */
10733         rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10734
10735         if (!rc) {
10736                 /* fw version is good */
10737                 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10738                 rc = bnx2x_do_flr(bp);
10739         }
10740
10741         if (!rc) {
10742                 /* FLR was performed */
10743                 BNX2X_DEV_INFO("FLR successful\n");
10744                 return 0;
10745         }
10746
10747         BNX2X_DEV_INFO("Could not FLR\n");
10748
10749 out:
10750         /* Close the MCP request, return failure*/
10751         rc = bnx2x_prev_mcp_done(bp);
10752         if (!rc)
10753                 rc = BNX2X_PREV_WAIT_NEEDED;
10754
10755         return rc;
10756 }
10757
10758 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10759 {
10760         u32 reset_reg, tmp_reg = 0, rc;
10761         bool prev_undi = false;
10762         struct bnx2x_mac_vals mac_vals;
10763
10764         /* It is possible a previous function received 'common' answer,
10765          * but hasn't loaded yet, therefore creating a scenario of
10766          * multiple functions receiving 'common' on the same path.
10767          */
10768         BNX2X_DEV_INFO("Common unload Flow\n");
10769
10770         memset(&mac_vals, 0, sizeof(mac_vals));
10771
10772         if (bnx2x_prev_is_path_marked(bp))
10773                 return bnx2x_prev_mcp_done(bp);
10774
10775         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10776
10777         /* Reset should be performed after BRB is emptied */
10778         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10779                 u32 timer_count = 1000;
10780
10781                 /* Close the MAC Rx to prevent BRB from filling up */
10782                 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10783
10784                 /* close LLH filters for both ports towards the BRB */
10785                 bnx2x_set_rx_filter(&bp->link_params, 0);
10786                 bp->link_params.port ^= 1;
10787                 bnx2x_set_rx_filter(&bp->link_params, 0);
10788                 bp->link_params.port ^= 1;
10789
10790                 /* Check if the UNDI driver was previously loaded */
10791                 if (bnx2x_prev_is_after_undi(bp)) {
10792                         prev_undi = true;
10793                         /* clear the UNDI indication */
10794                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10795                         /* clear possible idle check errors */
10796                         REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10797                 }
10798                 if (!CHIP_IS_E1x(bp))
10799                         /* block FW from writing to host */
10800                         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10801
10802                 /* wait until BRB is empty */
10803                 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10804                 while (timer_count) {
10805                         u32 prev_brb = tmp_reg;
10806
10807                         tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10808                         if (!tmp_reg)
10809                                 break;
10810
10811                         BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10812
10813                         /* reset timer as long as BRB actually gets emptied */
10814                         if (prev_brb > tmp_reg)
10815                                 timer_count = 1000;
10816                         else
10817                                 timer_count--;
10818
10819                         /* If UNDI resides in memory, manually increment it */
10820                         if (prev_undi)
10821                                 bnx2x_prev_unload_undi_inc(bp, 1);
10822
10823                         udelay(10);
10824                 }
10825
10826                 if (!timer_count)
10827                         BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10828         }
10829
10830         /* No packets are in the pipeline, path is ready for reset */
10831         bnx2x_reset_common(bp);
10832
10833         if (mac_vals.xmac_addr)
10834                 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10835         if (mac_vals.umac_addr[0])
10836                 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10837         if (mac_vals.umac_addr[1])
10838                 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10839         if (mac_vals.emac_addr)
10840                 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10841         if (mac_vals.bmac_addr) {
10842                 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10843                 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10844         }
10845
10846         rc = bnx2x_prev_mark_path(bp, prev_undi);
10847         if (rc) {
10848                 bnx2x_prev_mcp_done(bp);
10849                 return rc;
10850         }
10851
10852         return bnx2x_prev_mcp_done(bp);
10853 }
10854
10855 static int bnx2x_prev_unload(struct bnx2x *bp)
10856 {
10857         int time_counter = 10;
10858         u32 rc, fw, hw_lock_reg, hw_lock_val;
10859         BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10860
10861         /* clear hw from errors which may have resulted from an interrupted
10862          * dmae transaction.
10863          */
10864         bnx2x_clean_pglue_errors(bp);
10865
10866         /* Release previously held locks */
10867         hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10868                       (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10869                       (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10870
10871         hw_lock_val = REG_RD(bp, hw_lock_reg);
10872         if (hw_lock_val) {
10873                 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10874                         BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10875                         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10876                                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10877                 }
10878
10879                 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10880                 REG_WR(bp, hw_lock_reg, 0xffffffff);
10881         } else
10882                 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10883
10884         if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10885                 BNX2X_DEV_INFO("Release previously held alr\n");
10886                 bnx2x_release_alr(bp);
10887         }
10888
10889         do {
10890                 int aer = 0;
10891                 /* Lock MCP using an unload request */
10892                 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10893                 if (!fw) {
10894                         BNX2X_ERR("MCP response failure, aborting\n");
10895                         rc = -EBUSY;
10896                         break;
10897                 }
10898
10899                 rc = down_interruptible(&bnx2x_prev_sem);
10900                 if (rc) {
10901                         BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10902                                   rc);
10903                 } else {
10904                         /* If Path is marked by EEH, ignore unload status */
10905                         aer = !!(bnx2x_prev_path_get_entry(bp) &&
10906                                  bnx2x_prev_path_get_entry(bp)->aer);
10907                         up(&bnx2x_prev_sem);
10908                 }
10909
10910                 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10911                         rc = bnx2x_prev_unload_common(bp);
10912                         break;
10913                 }
10914
10915                 /* non-common reply from MCP might require looping */
10916                 rc = bnx2x_prev_unload_uncommon(bp);
10917                 if (rc != BNX2X_PREV_WAIT_NEEDED)
10918                         break;
10919
10920                 msleep(20);
10921         } while (--time_counter);
10922
10923         if (!time_counter || rc) {
10924                 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10925                 rc = -EPROBE_DEFER;
10926         }
10927
10928         /* Mark function if its port was used to boot from SAN */
10929         if (bnx2x_port_after_undi(bp))
10930                 bp->link_params.feature_config_flags |=
10931                         FEATURE_CONFIG_BOOT_FROM_SAN;
10932
10933         BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10934
10935         return rc;
10936 }
10937
10938 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10939 {
10940         u32 val, val2, val3, val4, id, boot_mode;
10941         u16 pmc;
10942
10943         /* Get the chip revision id and number. */
10944         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10945         val = REG_RD(bp, MISC_REG_CHIP_NUM);
10946         id = ((val & 0xffff) << 16);
10947         val = REG_RD(bp, MISC_REG_CHIP_REV);
10948         id |= ((val & 0xf) << 12);
10949
10950         /* Metal is read from PCI regs, but we can't access >=0x400 from
10951          * the configuration space (so we need to reg_rd)
10952          */
10953         val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10954         id |= (((val >> 24) & 0xf) << 4);
10955         val = REG_RD(bp, MISC_REG_BOND_ID);
10956         id |= (val & 0xf);
10957         bp->common.chip_id = id;
10958
10959         /* force 57811 according to MISC register */
10960         if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10961                 if (CHIP_IS_57810(bp))
10962                         bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10963                                 (bp->common.chip_id & 0x0000FFFF);
10964                 else if (CHIP_IS_57810_MF(bp))
10965                         bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10966                                 (bp->common.chip_id & 0x0000FFFF);
10967                 bp->common.chip_id |= 0x1;
10968         }
10969
10970         /* Set doorbell size */
10971         bp->db_size = (1 << BNX2X_DB_SHIFT);
10972
10973         if (!CHIP_IS_E1x(bp)) {
10974                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10975                 if ((val & 1) == 0)
10976                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10977                 else
10978                         val = (val >> 1) & 1;
10979                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10980                                                        "2_PORT_MODE");
10981                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10982                                                  CHIP_2_PORT_MODE;
10983
10984                 if (CHIP_MODE_IS_4_PORT(bp))
10985                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
10986                 else
10987                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
10988         } else {
10989                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10990                 bp->pfid = bp->pf_num;                  /* 0..7 */
10991         }
10992
10993         BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10994
10995         bp->link_params.chip_id = bp->common.chip_id;
10996         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10997
10998         val = (REG_RD(bp, 0x2874) & 0x55);
10999         if ((bp->common.chip_id & 0x1) ||
11000             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
11001                 bp->flags |= ONE_PORT_FLAG;
11002                 BNX2X_DEV_INFO("single port device\n");
11003         }
11004
11005         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11006         bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11007                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
11008         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11009                        bp->common.flash_size, bp->common.flash_size);
11010
11011         bnx2x_init_shmem(bp);
11012
11013         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11014                                         MISC_REG_GENERIC_CR_1 :
11015                                         MISC_REG_GENERIC_CR_0));
11016
11017         bp->link_params.shmem_base = bp->common.shmem_base;
11018         bp->link_params.shmem2_base = bp->common.shmem2_base;
11019         if (SHMEM2_RD(bp, size) >
11020             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11021                 bp->link_params.lfa_base =
11022                 REG_RD(bp, bp->common.shmem2_base +
11023                        (u32)offsetof(struct shmem2_region,
11024                                      lfa_host_addr[BP_PORT(bp)]));
11025         else
11026                 bp->link_params.lfa_base = 0;
11027         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
11028                        bp->common.shmem_base, bp->common.shmem2_base);
11029
11030         if (!bp->common.shmem_base) {
11031                 BNX2X_DEV_INFO("MCP not active\n");
11032                 bp->flags |= NO_MCP_FLAG;
11033                 return;
11034         }
11035
11036         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11037         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11038
11039         bp->link_params.hw_led_mode = ((bp->common.hw_config &
11040                                         SHARED_HW_CFG_LED_MODE_MASK) >>
11041                                        SHARED_HW_CFG_LED_MODE_SHIFT);
11042
11043         bp->link_params.feature_config_flags = 0;
11044         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11045         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11046                 bp->link_params.feature_config_flags |=
11047                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11048         else
11049                 bp->link_params.feature_config_flags &=
11050                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11051
11052         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11053         bp->common.bc_ver = val;
11054         BNX2X_DEV_INFO("bc_ver %X\n", val);
11055         if (val < BNX2X_BC_VER) {
11056                 /* for now only warn
11057                  * later we might need to enforce this */
11058                 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11059                           BNX2X_BC_VER, val);
11060         }
11061         bp->link_params.feature_config_flags |=
11062                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11063                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11064
11065         bp->link_params.feature_config_flags |=
11066                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11067                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11068         bp->link_params.feature_config_flags |=
11069                 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11070                 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11071         bp->link_params.feature_config_flags |=
11072                 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11073                 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11074
11075         bp->link_params.feature_config_flags |=
11076                 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11077                 FEATURE_CONFIG_MT_SUPPORT : 0;
11078
11079         bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11080                         BC_SUPPORTS_PFC_STATS : 0;
11081
11082         bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11083                         BC_SUPPORTS_FCOE_FEATURES : 0;
11084
11085         bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11086                         BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11087
11088         bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11089                         BC_SUPPORTS_RMMOD_CMD : 0;
11090
11091         boot_mode = SHMEM_RD(bp,
11092                         dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11093                         PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11094         switch (boot_mode) {
11095         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11096                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11097                 break;
11098         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11099                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11100                 break;
11101         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11102                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11103                 break;
11104         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11105                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11106                 break;
11107         }
11108
11109         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11110         bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11111
11112         BNX2X_DEV_INFO("%sWoL capable\n",
11113                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
11114
11115         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11116         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11117         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11118         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11119
11120         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11121                  val, val2, val3, val4);
11122 }
11123
11124 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11125 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11126
11127 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11128 {
11129         int pfid = BP_FUNC(bp);
11130         int igu_sb_id;
11131         u32 val;
11132         u8 fid, igu_sb_cnt = 0;
11133
11134         bp->igu_base_sb = 0xff;
11135         if (CHIP_INT_MODE_IS_BC(bp)) {
11136                 int vn = BP_VN(bp);
11137                 igu_sb_cnt = bp->igu_sb_cnt;
11138                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11139                         FP_SB_MAX_E1x;
11140
11141                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
11142                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11143
11144                 return 0;
11145         }
11146
11147         /* IGU in normal mode - read CAM */
11148         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11149              igu_sb_id++) {
11150                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11151                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11152                         continue;
11153                 fid = IGU_FID(val);
11154                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11155                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11156                                 continue;
11157                         if (IGU_VEC(val) == 0)
11158                                 /* default status block */
11159                                 bp->igu_dsb_id = igu_sb_id;
11160                         else {
11161                                 if (bp->igu_base_sb == 0xff)
11162                                         bp->igu_base_sb = igu_sb_id;
11163                                 igu_sb_cnt++;
11164                         }
11165                 }
11166         }
11167
11168 #ifdef CONFIG_PCI_MSI
11169         /* Due to new PF resource allocation by MFW T7.4 and above, it's
11170          * optional that number of CAM entries will not be equal to the value
11171          * advertised in PCI.
11172          * Driver should use the minimal value of both as the actual status
11173          * block count
11174          */
11175         bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11176 #endif
11177
11178         if (igu_sb_cnt == 0) {
11179                 BNX2X_ERR("CAM configuration error\n");
11180                 return -EINVAL;
11181         }
11182
11183         return 0;
11184 }
11185
11186 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11187 {
11188         int cfg_size = 0, idx, port = BP_PORT(bp);
11189
11190         /* Aggregation of supported attributes of all external phys */
11191         bp->port.supported[0] = 0;
11192         bp->port.supported[1] = 0;
11193         switch (bp->link_params.num_phys) {
11194         case 1:
11195                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11196                 cfg_size = 1;
11197                 break;
11198         case 2:
11199                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11200                 cfg_size = 1;
11201                 break;
11202         case 3:
11203                 if (bp->link_params.multi_phy_config &
11204                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11205                         bp->port.supported[1] =
11206                                 bp->link_params.phy[EXT_PHY1].supported;
11207                         bp->port.supported[0] =
11208                                 bp->link_params.phy[EXT_PHY2].supported;
11209                 } else {
11210                         bp->port.supported[0] =
11211                                 bp->link_params.phy[EXT_PHY1].supported;
11212                         bp->port.supported[1] =
11213                                 bp->link_params.phy[EXT_PHY2].supported;
11214                 }
11215                 cfg_size = 2;
11216                 break;
11217         }
11218
11219         if (!(bp->port.supported[0] || bp->port.supported[1])) {
11220                 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11221                            SHMEM_RD(bp,
11222                            dev_info.port_hw_config[port].external_phy_config),
11223                            SHMEM_RD(bp,
11224                            dev_info.port_hw_config[port].external_phy_config2));
11225                         return;
11226         }
11227
11228         if (CHIP_IS_E3(bp))
11229                 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11230         else {
11231                 switch (switch_cfg) {
11232                 case SWITCH_CFG_1G:
11233                         bp->port.phy_addr = REG_RD(
11234                                 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11235                         break;
11236                 case SWITCH_CFG_10G:
11237                         bp->port.phy_addr = REG_RD(
11238                                 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11239                         break;
11240                 default:
11241                         BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11242                                   bp->port.link_config[0]);
11243                         return;
11244                 }
11245         }
11246         BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11247         /* mask what we support according to speed_cap_mask per configuration */
11248         for (idx = 0; idx < cfg_size; idx++) {
11249                 if (!(bp->link_params.speed_cap_mask[idx] &
11250                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11251                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11252
11253                 if (!(bp->link_params.speed_cap_mask[idx] &
11254                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11255                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11256
11257                 if (!(bp->link_params.speed_cap_mask[idx] &
11258                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11259                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11260
11261                 if (!(bp->link_params.speed_cap_mask[idx] &
11262                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11263                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11264
11265                 if (!(bp->link_params.speed_cap_mask[idx] &
11266                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11267                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11268                                                      SUPPORTED_1000baseT_Full);
11269
11270                 if (!(bp->link_params.speed_cap_mask[idx] &
11271                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11272                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11273
11274                 if (!(bp->link_params.speed_cap_mask[idx] &
11275                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11276                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11277
11278                 if (!(bp->link_params.speed_cap_mask[idx] &
11279                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11280                         bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11281         }
11282
11283         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11284                        bp->port.supported[1]);
11285 }
11286
11287 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11288 {
11289         u32 link_config, idx, cfg_size = 0;
11290         bp->port.advertising[0] = 0;
11291         bp->port.advertising[1] = 0;
11292         switch (bp->link_params.num_phys) {
11293         case 1:
11294         case 2:
11295                 cfg_size = 1;
11296                 break;
11297         case 3:
11298                 cfg_size = 2;
11299                 break;
11300         }
11301         for (idx = 0; idx < cfg_size; idx++) {
11302                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11303                 link_config = bp->port.link_config[idx];
11304                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11305                 case PORT_FEATURE_LINK_SPEED_AUTO:
11306                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11307                                 bp->link_params.req_line_speed[idx] =
11308                                         SPEED_AUTO_NEG;
11309                                 bp->port.advertising[idx] |=
11310                                         bp->port.supported[idx];
11311                                 if (bp->link_params.phy[EXT_PHY1].type ==
11312                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11313                                         bp->port.advertising[idx] |=
11314                                         (SUPPORTED_100baseT_Half |
11315                                          SUPPORTED_100baseT_Full);
11316                         } else {
11317                                 /* force 10G, no AN */
11318                                 bp->link_params.req_line_speed[idx] =
11319                                         SPEED_10000;
11320                                 bp->port.advertising[idx] |=
11321                                         (ADVERTISED_10000baseT_Full |
11322                                          ADVERTISED_FIBRE);
11323                                 continue;
11324                         }
11325                         break;
11326
11327                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11328                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11329                                 bp->link_params.req_line_speed[idx] =
11330                                         SPEED_10;
11331                                 bp->port.advertising[idx] |=
11332                                         (ADVERTISED_10baseT_Full |
11333                                          ADVERTISED_TP);
11334                         } else {
11335                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11336                                             link_config,
11337                                     bp->link_params.speed_cap_mask[idx]);
11338                                 return;
11339                         }
11340                         break;
11341
11342                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11343                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11344                                 bp->link_params.req_line_speed[idx] =
11345                                         SPEED_10;
11346                                 bp->link_params.req_duplex[idx] =
11347                                         DUPLEX_HALF;
11348                                 bp->port.advertising[idx] |=
11349                                         (ADVERTISED_10baseT_Half |
11350                                          ADVERTISED_TP);
11351                         } else {
11352                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11353                                             link_config,
11354                                           bp->link_params.speed_cap_mask[idx]);
11355                                 return;
11356                         }
11357                         break;
11358
11359                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11360                         if (bp->port.supported[idx] &
11361                             SUPPORTED_100baseT_Full) {
11362                                 bp->link_params.req_line_speed[idx] =
11363                                         SPEED_100;
11364                                 bp->port.advertising[idx] |=
11365                                         (ADVERTISED_100baseT_Full |
11366                                          ADVERTISED_TP);
11367                         } else {
11368                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11369                                             link_config,
11370                                           bp->link_params.speed_cap_mask[idx]);
11371                                 return;
11372                         }
11373                         break;
11374
11375                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11376                         if (bp->port.supported[idx] &
11377                             SUPPORTED_100baseT_Half) {
11378                                 bp->link_params.req_line_speed[idx] =
11379                                                                 SPEED_100;
11380                                 bp->link_params.req_duplex[idx] =
11381                                                                 DUPLEX_HALF;
11382                                 bp->port.advertising[idx] |=
11383                                         (ADVERTISED_100baseT_Half |
11384                                          ADVERTISED_TP);
11385                         } else {
11386                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11387                                     link_config,
11388                                     bp->link_params.speed_cap_mask[idx]);
11389                                 return;
11390                         }
11391                         break;
11392
11393                 case PORT_FEATURE_LINK_SPEED_1G:
11394                         if (bp->port.supported[idx] &
11395                             SUPPORTED_1000baseT_Full) {
11396                                 bp->link_params.req_line_speed[idx] =
11397                                         SPEED_1000;
11398                                 bp->port.advertising[idx] |=
11399                                         (ADVERTISED_1000baseT_Full |
11400                                          ADVERTISED_TP);
11401                         } else if (bp->port.supported[idx] &
11402                                    SUPPORTED_1000baseKX_Full) {
11403                                 bp->link_params.req_line_speed[idx] =
11404                                         SPEED_1000;
11405                                 bp->port.advertising[idx] |=
11406                                         ADVERTISED_1000baseKX_Full;
11407                         } else {
11408                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11409                                     link_config,
11410                                     bp->link_params.speed_cap_mask[idx]);
11411                                 return;
11412                         }
11413                         break;
11414
11415                 case PORT_FEATURE_LINK_SPEED_2_5G:
11416                         if (bp->port.supported[idx] &
11417                             SUPPORTED_2500baseX_Full) {
11418                                 bp->link_params.req_line_speed[idx] =
11419                                         SPEED_2500;
11420                                 bp->port.advertising[idx] |=
11421                                         (ADVERTISED_2500baseX_Full |
11422                                                 ADVERTISED_TP);
11423                         } else {
11424                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11425                                     link_config,
11426                                     bp->link_params.speed_cap_mask[idx]);
11427                                 return;
11428                         }
11429                         break;
11430
11431                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11432                         if (bp->port.supported[idx] &
11433                             SUPPORTED_10000baseT_Full) {
11434                                 bp->link_params.req_line_speed[idx] =
11435                                         SPEED_10000;
11436                                 bp->port.advertising[idx] |=
11437                                         (ADVERTISED_10000baseT_Full |
11438                                                 ADVERTISED_FIBRE);
11439                         } else if (bp->port.supported[idx] &
11440                                    SUPPORTED_10000baseKR_Full) {
11441                                 bp->link_params.req_line_speed[idx] =
11442                                         SPEED_10000;
11443                                 bp->port.advertising[idx] |=
11444                                         (ADVERTISED_10000baseKR_Full |
11445                                                 ADVERTISED_FIBRE);
11446                         } else {
11447                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
11448                                     link_config,
11449                                     bp->link_params.speed_cap_mask[idx]);
11450                                 return;
11451                         }
11452                         break;
11453                 case PORT_FEATURE_LINK_SPEED_20G:
11454                         bp->link_params.req_line_speed[idx] = SPEED_20000;
11455
11456                         break;
11457                 default:
11458                         BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11459                                   link_config);
11460                                 bp->link_params.req_line_speed[idx] =
11461                                                         SPEED_AUTO_NEG;
11462                                 bp->port.advertising[idx] =
11463                                                 bp->port.supported[idx];
11464                         break;
11465                 }
11466
11467                 bp->link_params.req_flow_ctrl[idx] = (link_config &
11468                                          PORT_FEATURE_FLOW_CONTROL_MASK);
11469                 if (bp->link_params.req_flow_ctrl[idx] ==
11470                     BNX2X_FLOW_CTRL_AUTO) {
11471                         if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11472                                 bp->link_params.req_flow_ctrl[idx] =
11473                                                         BNX2X_FLOW_CTRL_NONE;
11474                         else
11475                                 bnx2x_set_requested_fc(bp);
11476                 }
11477
11478                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11479                                bp->link_params.req_line_speed[idx],
11480                                bp->link_params.req_duplex[idx],
11481                                bp->link_params.req_flow_ctrl[idx],
11482                                bp->port.advertising[idx]);
11483         }
11484 }
11485
11486 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11487 {
11488         __be16 mac_hi_be = cpu_to_be16(mac_hi);
11489         __be32 mac_lo_be = cpu_to_be32(mac_lo);
11490         memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11491         memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11492 }
11493
11494 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11495 {
11496         int port = BP_PORT(bp);
11497         u32 config;
11498         u32 ext_phy_type, ext_phy_config, eee_mode;
11499
11500         bp->link_params.bp = bp;
11501         bp->link_params.port = port;
11502
11503         bp->link_params.lane_config =
11504                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11505
11506         bp->link_params.speed_cap_mask[0] =
11507                 SHMEM_RD(bp,
11508                          dev_info.port_hw_config[port].speed_capability_mask) &
11509                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11510         bp->link_params.speed_cap_mask[1] =
11511                 SHMEM_RD(bp,
11512                          dev_info.port_hw_config[port].speed_capability_mask2) &
11513                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11514         bp->port.link_config[0] =
11515                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11516
11517         bp->port.link_config[1] =
11518                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11519
11520         bp->link_params.multi_phy_config =
11521                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11522         /* If the device is capable of WoL, set the default state according
11523          * to the HW
11524          */
11525         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11526         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11527                    (config & PORT_FEATURE_WOL_ENABLED));
11528
11529         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11530             PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11531                 bp->flags |= NO_ISCSI_FLAG;
11532         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11533             PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11534                 bp->flags |= NO_FCOE_FLAG;
11535
11536         BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
11537                        bp->link_params.lane_config,
11538                        bp->link_params.speed_cap_mask[0],
11539                        bp->port.link_config[0]);
11540
11541         bp->link_params.switch_cfg = (bp->port.link_config[0] &
11542                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
11543         bnx2x_phy_probe(&bp->link_params);
11544         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11545
11546         bnx2x_link_settings_requested(bp);
11547
11548         /*
11549          * If connected directly, work with the internal PHY, otherwise, work
11550          * with the external PHY
11551          */
11552         ext_phy_config =
11553                 SHMEM_RD(bp,
11554                          dev_info.port_hw_config[port].external_phy_config);
11555         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11556         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11557                 bp->mdio.prtad = bp->port.phy_addr;
11558
11559         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11560                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11561                 bp->mdio.prtad =
11562                         XGXS_EXT_PHY_ADDR(ext_phy_config);
11563
11564         /* Configure link feature according to nvram value */
11565         eee_mode = (((SHMEM_RD(bp, dev_info.
11566                       port_feature_config[port].eee_power_mode)) &
11567                      PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11568                     PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11569         if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11570                 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11571                                            EEE_MODE_ENABLE_LPI |
11572                                            EEE_MODE_OUTPUT_TIME;
11573         } else {
11574                 bp->link_params.eee_mode = 0;
11575         }
11576 }
11577
11578 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11579 {
11580         u32 no_flags = NO_ISCSI_FLAG;
11581         int port = BP_PORT(bp);
11582         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11583                                 drv_lic_key[port].max_iscsi_conn);
11584
11585         if (!CNIC_SUPPORT(bp)) {
11586                 bp->flags |= no_flags;
11587                 return;
11588         }
11589
11590         /* Get the number of maximum allowed iSCSI connections */
11591         bp->cnic_eth_dev.max_iscsi_conn =
11592                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11593                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11594
11595         BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11596                        bp->cnic_eth_dev.max_iscsi_conn);
11597
11598         /*
11599          * If maximum allowed number of connections is zero -
11600          * disable the feature.
11601          */
11602         if (!bp->cnic_eth_dev.max_iscsi_conn)
11603                 bp->flags |= no_flags;
11604 }
11605
11606 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11607 {
11608         /* Port info */
11609         bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11610                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11611         bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11612                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11613
11614         /* Node info */
11615         bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11616                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11617         bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11618                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11619 }
11620
11621 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11622 {
11623         u8 count = 0;
11624
11625         if (IS_MF(bp)) {
11626                 u8 fid;
11627
11628                 /* iterate over absolute function ids for this path: */
11629                 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11630                         if (IS_MF_SD(bp)) {
11631                                 u32 cfg = MF_CFG_RD(bp,
11632                                                     func_mf_config[fid].config);
11633
11634                                 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11635                                     ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11636                                             FUNC_MF_CFG_PROTOCOL_FCOE))
11637                                         count++;
11638                         } else {
11639                                 u32 cfg = MF_CFG_RD(bp,
11640                                                     func_ext_config[fid].
11641                                                                       func_cfg);
11642
11643                                 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11644                                     (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11645                                         count++;
11646                         }
11647                 }
11648         } else { /* SF */
11649                 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11650
11651                 for (port = 0; port < port_cnt; port++) {
11652                         u32 lic = SHMEM_RD(bp,
11653                                            drv_lic_key[port].max_fcoe_conn) ^
11654                                   FW_ENCODE_32BIT_PATTERN;
11655                         if (lic)
11656                                 count++;
11657                 }
11658         }
11659
11660         return count;
11661 }
11662
11663 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11664 {
11665         int port = BP_PORT(bp);
11666         int func = BP_ABS_FUNC(bp);
11667         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11668                                 drv_lic_key[port].max_fcoe_conn);
11669         u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11670
11671         if (!CNIC_SUPPORT(bp)) {
11672                 bp->flags |= NO_FCOE_FLAG;
11673                 return;
11674         }
11675
11676         /* Get the number of maximum allowed FCoE connections */
11677         bp->cnic_eth_dev.max_fcoe_conn =
11678                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11679                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11680
11681         /* Calculate the number of maximum allowed FCoE tasks */
11682         bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11683
11684         /* check if FCoE resources must be shared between different functions */
11685         if (num_fcoe_func)
11686                 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11687
11688         /* Read the WWN: */
11689         if (!IS_MF(bp)) {
11690                 /* Port info */
11691                 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11692                         SHMEM_RD(bp,
11693                                  dev_info.port_hw_config[port].
11694                                  fcoe_wwn_port_name_upper);
11695                 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11696                         SHMEM_RD(bp,
11697                                  dev_info.port_hw_config[port].
11698                                  fcoe_wwn_port_name_lower);
11699
11700                 /* Node info */
11701                 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11702                         SHMEM_RD(bp,
11703                                  dev_info.port_hw_config[port].
11704                                  fcoe_wwn_node_name_upper);
11705                 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11706                         SHMEM_RD(bp,
11707                                  dev_info.port_hw_config[port].
11708                                  fcoe_wwn_node_name_lower);
11709         } else if (!IS_MF_SD(bp)) {
11710                 /* Read the WWN info only if the FCoE feature is enabled for
11711                  * this function.
11712                  */
11713                 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11714                         bnx2x_get_ext_wwn_info(bp, func);
11715         } else {
11716                 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11717                         bnx2x_get_ext_wwn_info(bp, func);
11718         }
11719
11720         BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11721
11722         /*
11723          * If maximum allowed number of connections is zero -
11724          * disable the feature.
11725          */
11726         if (!bp->cnic_eth_dev.max_fcoe_conn)
11727                 bp->flags |= NO_FCOE_FLAG;
11728 }
11729
11730 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11731 {
11732         /*
11733          * iSCSI may be dynamically disabled but reading
11734          * info here we will decrease memory usage by driver
11735          * if the feature is disabled for good
11736          */
11737         bnx2x_get_iscsi_info(bp);
11738         bnx2x_get_fcoe_info(bp);
11739 }
11740
11741 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11742 {
11743         u32 val, val2;
11744         int func = BP_ABS_FUNC(bp);
11745         int port = BP_PORT(bp);
11746         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11747         u8 *fip_mac = bp->fip_mac;
11748
11749         if (IS_MF(bp)) {
11750                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11751                  * FCoE MAC then the appropriate feature should be disabled.
11752                  * In non SD mode features configuration comes from struct
11753                  * func_ext_config.
11754                  */
11755                 if (!IS_MF_SD(bp)) {
11756                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11757                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11758                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11759                                                  iscsi_mac_addr_upper);
11760                                 val = MF_CFG_RD(bp, func_ext_config[func].
11761                                                 iscsi_mac_addr_lower);
11762                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11763                                 BNX2X_DEV_INFO
11764                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11765                         } else {
11766                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11767                         }
11768
11769                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11770                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11771                                                  fcoe_mac_addr_upper);
11772                                 val = MF_CFG_RD(bp, func_ext_config[func].
11773                                                 fcoe_mac_addr_lower);
11774                                 bnx2x_set_mac_buf(fip_mac, val, val2);
11775                                 BNX2X_DEV_INFO
11776                                         ("Read FCoE L2 MAC: %pM\n", fip_mac);
11777                         } else {
11778                                 bp->flags |= NO_FCOE_FLAG;
11779                         }
11780
11781                         bp->mf_ext_config = cfg;
11782
11783                 } else { /* SD MODE */
11784                         if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11785                                 /* use primary mac as iscsi mac */
11786                                 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11787
11788                                 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11789                                 BNX2X_DEV_INFO
11790                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11791                         } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11792                                 /* use primary mac as fip mac */
11793                                 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11794                                 BNX2X_DEV_INFO("SD FCoE MODE\n");
11795                                 BNX2X_DEV_INFO
11796                                         ("Read FIP MAC: %pM\n", fip_mac);
11797                         }
11798                 }
11799
11800                 /* If this is a storage-only interface, use SAN mac as
11801                  * primary MAC. Notice that for SD this is already the case,
11802                  * as the SAN mac was copied from the primary MAC.
11803                  */
11804                 if (IS_MF_FCOE_AFEX(bp))
11805                         memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11806         } else {
11807                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11808                                 iscsi_mac_upper);
11809                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11810                                iscsi_mac_lower);
11811                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11812
11813                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11814                                 fcoe_fip_mac_upper);
11815                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11816                                fcoe_fip_mac_lower);
11817                 bnx2x_set_mac_buf(fip_mac, val, val2);
11818         }
11819
11820         /* Disable iSCSI OOO if MAC configuration is invalid. */
11821         if (!is_valid_ether_addr(iscsi_mac)) {
11822                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11823                 eth_zero_addr(iscsi_mac);
11824         }
11825
11826         /* Disable FCoE if MAC configuration is invalid. */
11827         if (!is_valid_ether_addr(fip_mac)) {
11828                 bp->flags |= NO_FCOE_FLAG;
11829                 eth_zero_addr(bp->fip_mac);
11830         }
11831 }
11832
11833 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11834 {
11835         u32 val, val2;
11836         int func = BP_ABS_FUNC(bp);
11837         int port = BP_PORT(bp);
11838
11839         /* Zero primary MAC configuration */
11840         eth_zero_addr(bp->dev->dev_addr);
11841
11842         if (BP_NOMCP(bp)) {
11843                 BNX2X_ERROR("warning: random MAC workaround active\n");
11844                 eth_hw_addr_random(bp->dev);
11845         } else if (IS_MF(bp)) {
11846                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11847                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11848                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11849                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11850                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11851
11852                 if (CNIC_SUPPORT(bp))
11853                         bnx2x_get_cnic_mac_hwinfo(bp);
11854         } else {
11855                 /* in SF read MACs from port configuration */
11856                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11857                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11858                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11859
11860                 if (CNIC_SUPPORT(bp))
11861                         bnx2x_get_cnic_mac_hwinfo(bp);
11862         }
11863
11864         if (!BP_NOMCP(bp)) {
11865                 /* Read physical port identifier from shmem */
11866                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11867                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11868                 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11869                 bp->flags |= HAS_PHYS_PORT_ID;
11870         }
11871
11872         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11873
11874         if (!is_valid_ether_addr(bp->dev->dev_addr))
11875                 dev_err(&bp->pdev->dev,
11876                         "bad Ethernet MAC address configuration: %pM\n"
11877                         "change it manually before bringing up the appropriate network interface\n",
11878                         bp->dev->dev_addr);
11879 }
11880
11881 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11882 {
11883         int tmp;
11884         u32 cfg;
11885
11886         if (IS_VF(bp))
11887                 return false;
11888
11889         if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11890                 /* Take function: tmp = func */
11891                 tmp = BP_ABS_FUNC(bp);
11892                 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11893                 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11894         } else {
11895                 /* Take port: tmp = port */
11896                 tmp = BP_PORT(bp);
11897                 cfg = SHMEM_RD(bp,
11898                                dev_info.port_hw_config[tmp].generic_features);
11899                 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11900         }
11901         return cfg;
11902 }
11903
11904 static void validate_set_si_mode(struct bnx2x *bp)
11905 {
11906         u8 func = BP_ABS_FUNC(bp);
11907         u32 val;
11908
11909         val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11910
11911         /* check for legal mac (upper bytes) */
11912         if (val != 0xffff) {
11913                 bp->mf_mode = MULTI_FUNCTION_SI;
11914                 bp->mf_config[BP_VN(bp)] =
11915                         MF_CFG_RD(bp, func_mf_config[func].config);
11916         } else
11917                 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11918 }
11919
11920 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11921 {
11922         int /*abs*/func = BP_ABS_FUNC(bp);
11923         int vn, mfw_vn;
11924         u32 val = 0, val2 = 0;
11925         int rc = 0;
11926
11927         /* Validate that chip access is feasible */
11928         if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11929                 dev_err(&bp->pdev->dev,
11930                         "Chip read returns all Fs. Preventing probe from continuing\n");
11931                 return -EINVAL;
11932         }
11933
11934         bnx2x_get_common_hwinfo(bp);
11935
11936         /*
11937          * initialize IGU parameters
11938          */
11939         if (CHIP_IS_E1x(bp)) {
11940                 bp->common.int_block = INT_BLOCK_HC;
11941
11942                 bp->igu_dsb_id = DEF_SB_IGU_ID;
11943                 bp->igu_base_sb = 0;
11944         } else {
11945                 bp->common.int_block = INT_BLOCK_IGU;
11946
11947                 /* do not allow device reset during IGU info processing */
11948                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11949
11950                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11951
11952                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11953                         int tout = 5000;
11954
11955                         BNX2X_DEV_INFO("FORCING Normal Mode\n");
11956
11957                         val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11958                         REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11959                         REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11960
11961                         while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11962                                 tout--;
11963                                 usleep_range(1000, 2000);
11964                         }
11965
11966                         if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11967                                 dev_err(&bp->pdev->dev,
11968                                         "FORCING Normal Mode failed!!!\n");
11969                                 bnx2x_release_hw_lock(bp,
11970                                                       HW_LOCK_RESOURCE_RESET);
11971                                 return -EPERM;
11972                         }
11973                 }
11974
11975                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11976                         BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11977                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11978                 } else
11979                         BNX2X_DEV_INFO("IGU Normal Mode\n");
11980
11981                 rc = bnx2x_get_igu_cam_info(bp);
11982                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11983                 if (rc)
11984                         return rc;
11985         }
11986
11987         /*
11988          * set base FW non-default (fast path) status block id, this value is
11989          * used to initialize the fw_sb_id saved on the fp/queue structure to
11990          * determine the id used by the FW.
11991          */
11992         if (CHIP_IS_E1x(bp))
11993                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11994         else /*
11995               * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11996               * the same queue are indicated on the same IGU SB). So we prefer
11997               * FW and IGU SBs to be the same value.
11998               */
11999                 bp->base_fw_ndsb = bp->igu_base_sb;
12000
12001         BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
12002                        "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12003                        bp->igu_sb_cnt, bp->base_fw_ndsb);
12004
12005         /*
12006          * Initialize MF configuration
12007          */
12008
12009         bp->mf_ov = 0;
12010         bp->mf_mode = 0;
12011         bp->mf_sub_mode = 0;
12012         vn = BP_VN(bp);
12013         mfw_vn = BP_FW_MB_IDX(bp);
12014
12015         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12016                 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12017                                bp->common.shmem2_base, SHMEM2_RD(bp, size),
12018                               (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12019
12020                 if (SHMEM2_HAS(bp, mf_cfg_addr))
12021                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12022                 else
12023                         bp->common.mf_cfg_base = bp->common.shmem_base +
12024                                 offsetof(struct shmem_region, func_mb) +
12025                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12026                 /*
12027                  * get mf configuration:
12028                  * 1. Existence of MF configuration
12029                  * 2. MAC address must be legal (check only upper bytes)
12030                  *    for  Switch-Independent mode;
12031                  *    OVLAN must be legal for Switch-Dependent mode
12032                  * 3. SF_MODE configures specific MF mode
12033                  */
12034                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12035                         /* get mf configuration */
12036                         val = SHMEM_RD(bp,
12037                                        dev_info.shared_feature_config.config);
12038                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12039
12040                         switch (val) {
12041                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12042                                 validate_set_si_mode(bp);
12043                                 break;
12044                         case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12045                                 if ((!CHIP_IS_E1x(bp)) &&
12046                                     (MF_CFG_RD(bp, func_mf_config[func].
12047                                                mac_upper) != 0xffff) &&
12048                                     (SHMEM2_HAS(bp,
12049                                                 afex_driver_support))) {
12050                                         bp->mf_mode = MULTI_FUNCTION_AFEX;
12051                                         bp->mf_config[vn] = MF_CFG_RD(bp,
12052                                                 func_mf_config[func].config);
12053                                 } else {
12054                                         BNX2X_DEV_INFO("can not configure afex mode\n");
12055                                 }
12056                                 break;
12057                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12058                                 /* get OV configuration */
12059                                 val = MF_CFG_RD(bp,
12060                                         func_mf_config[FUNC_0].e1hov_tag);
12061                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12062
12063                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12064                                         bp->mf_mode = MULTI_FUNCTION_SD;
12065                                         bp->mf_config[vn] = MF_CFG_RD(bp,
12066                                                 func_mf_config[func].config);
12067                                 } else
12068                                         BNX2X_DEV_INFO("illegal OV for SD\n");
12069                                 break;
12070                         case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12071                                 bp->mf_mode = MULTI_FUNCTION_SD;
12072                                 bp->mf_sub_mode = SUB_MF_MODE_BD;
12073                                 bp->mf_config[vn] =
12074                                         MF_CFG_RD(bp,
12075                                                   func_mf_config[func].config);
12076
12077                                 if (SHMEM2_HAS(bp, mtu_size)) {
12078                                         int mtu_idx = BP_FW_MB_IDX(bp);
12079                                         u16 mtu_size;
12080                                         u32 mtu;
12081
12082                                         mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12083                                         mtu_size = (u16)mtu;
12084                                         DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12085                                            mtu_size, mtu);
12086
12087                                         /* if valid: update device mtu */
12088                                         if (((mtu_size + ETH_HLEN) >=
12089                                              ETH_MIN_PACKET_SIZE) &&
12090                                             (mtu_size <=
12091                                              ETH_MAX_JUMBO_PACKET_SIZE))
12092                                                 bp->dev->mtu = mtu_size;
12093                                 }
12094                                 break;
12095                         case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12096                                 bp->mf_mode = MULTI_FUNCTION_SD;
12097                                 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12098                                 bp->mf_config[vn] =
12099                                         MF_CFG_RD(bp,
12100                                                   func_mf_config[func].config);
12101                                 break;
12102                         case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12103                                 bp->mf_config[vn] = 0;
12104                                 break;
12105                         case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12106                                 val2 = SHMEM_RD(bp,
12107                                         dev_info.shared_hw_config.config_3);
12108                                 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12109                                 switch (val2) {
12110                                 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12111                                         validate_set_si_mode(bp);
12112                                         bp->mf_sub_mode =
12113                                                         SUB_MF_MODE_NPAR1_DOT_5;
12114                                         break;
12115                                 default:
12116                                         /* Unknown configuration */
12117                                         bp->mf_config[vn] = 0;
12118                                         BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12119                                                        val);
12120                                 }
12121                                 break;
12122                         default:
12123                                 /* Unknown configuration: reset mf_config */
12124                                 bp->mf_config[vn] = 0;
12125                                 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12126                         }
12127                 }
12128
12129                 BNX2X_DEV_INFO("%s function mode\n",
12130                                IS_MF(bp) ? "multi" : "single");
12131
12132                 switch (bp->mf_mode) {
12133                 case MULTI_FUNCTION_SD:
12134                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12135                               FUNC_MF_CFG_E1HOV_TAG_MASK;
12136                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12137                                 bp->mf_ov = val;
12138                                 bp->path_has_ovlan = true;
12139
12140                                 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12141                                                func, bp->mf_ov, bp->mf_ov);
12142                         } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12143                                    (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12144                                 dev_err(&bp->pdev->dev,
12145                                         "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12146                                         func);
12147                                 bp->path_has_ovlan = true;
12148                         } else {
12149                                 dev_err(&bp->pdev->dev,
12150                                         "No valid MF OV for func %d, aborting\n",
12151                                         func);
12152                                 return -EPERM;
12153                         }
12154                         break;
12155                 case MULTI_FUNCTION_AFEX:
12156                         BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12157                         break;
12158                 case MULTI_FUNCTION_SI:
12159                         BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12160                                        func);
12161                         break;
12162                 default:
12163                         if (vn) {
12164                                 dev_err(&bp->pdev->dev,
12165                                         "VN %d is in a single function mode, aborting\n",
12166                                         vn);
12167                                 return -EPERM;
12168                         }
12169                         break;
12170                 }
12171
12172                 /* check if other port on the path needs ovlan:
12173                  * Since MF configuration is shared between ports
12174                  * Possible mixed modes are only
12175                  * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
12176                  */
12177                 if (CHIP_MODE_IS_4_PORT(bp) &&
12178                     !bp->path_has_ovlan &&
12179                     !IS_MF(bp) &&
12180                     bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12181                         u8 other_port = !BP_PORT(bp);
12182                         u8 other_func = BP_PATH(bp) + 2*other_port;
12183                         val = MF_CFG_RD(bp,
12184                                         func_mf_config[other_func].e1hov_tag);
12185                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12186                                 bp->path_has_ovlan = true;
12187                 }
12188         }
12189
12190         /* adjust igu_sb_cnt to MF for E1H */
12191         if (CHIP_IS_E1H(bp) && IS_MF(bp))
12192                 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12193
12194         /* port info */
12195         bnx2x_get_port_hwinfo(bp);
12196
12197         /* Get MAC addresses */
12198         bnx2x_get_mac_hwinfo(bp);
12199
12200         bnx2x_get_cnic_info(bp);
12201
12202         return rc;
12203 }
12204
12205 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12206 {
12207         int cnt, i, block_end, rodi;
12208         char vpd_start[BNX2X_VPD_LEN+1];
12209         char str_id_reg[VENDOR_ID_LEN+1];
12210         char str_id_cap[VENDOR_ID_LEN+1];
12211         char *vpd_data;
12212         char *vpd_extended_data = NULL;
12213         u8 len;
12214
12215         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
12216         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12217
12218         if (cnt < BNX2X_VPD_LEN)
12219                 goto out_not_found;
12220
12221         /* VPD RO tag should be first tag after identifier string, hence
12222          * we should be able to find it in first BNX2X_VPD_LEN chars
12223          */
12224         i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
12225                              PCI_VPD_LRDT_RO_DATA);
12226         if (i < 0)
12227                 goto out_not_found;
12228
12229         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
12230                     pci_vpd_lrdt_size(&vpd_start[i]);
12231
12232         i += PCI_VPD_LRDT_TAG_SIZE;
12233
12234         if (block_end > BNX2X_VPD_LEN) {
12235                 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
12236                 if (vpd_extended_data  == NULL)
12237                         goto out_not_found;
12238
12239                 /* read rest of vpd image into vpd_extended_data */
12240                 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
12241                 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
12242                                    block_end - BNX2X_VPD_LEN,
12243                                    vpd_extended_data + BNX2X_VPD_LEN);
12244                 if (cnt < (block_end - BNX2X_VPD_LEN))
12245                         goto out_not_found;
12246                 vpd_data = vpd_extended_data;
12247         } else
12248                 vpd_data = vpd_start;
12249
12250         /* now vpd_data holds full vpd content in both cases */
12251
12252         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12253                                    PCI_VPD_RO_KEYWORD_MFR_ID);
12254         if (rodi < 0)
12255                 goto out_not_found;
12256
12257         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12258
12259         if (len != VENDOR_ID_LEN)
12260                 goto out_not_found;
12261
12262         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12263
12264         /* vendor specific info */
12265         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12266         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
12267         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
12268             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
12269
12270                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
12271                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
12272                 if (rodi >= 0) {
12273                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
12274
12275                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
12276
12277                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
12278                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
12279                                 bp->fw_ver[len] = ' ';
12280                         }
12281                 }
12282                 kfree(vpd_extended_data);
12283                 return;
12284         }
12285 out_not_found:
12286         kfree(vpd_extended_data);
12287         return;
12288 }
12289
12290 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12291 {
12292         u32 flags = 0;
12293
12294         if (CHIP_REV_IS_FPGA(bp))
12295                 SET_FLAGS(flags, MODE_FPGA);
12296         else if (CHIP_REV_IS_EMUL(bp))
12297                 SET_FLAGS(flags, MODE_EMUL);
12298         else
12299                 SET_FLAGS(flags, MODE_ASIC);
12300
12301         if (CHIP_MODE_IS_4_PORT(bp))
12302                 SET_FLAGS(flags, MODE_PORT4);
12303         else
12304                 SET_FLAGS(flags, MODE_PORT2);
12305
12306         if (CHIP_IS_E2(bp))
12307                 SET_FLAGS(flags, MODE_E2);
12308         else if (CHIP_IS_E3(bp)) {
12309                 SET_FLAGS(flags, MODE_E3);
12310                 if (CHIP_REV(bp) == CHIP_REV_Ax)
12311                         SET_FLAGS(flags, MODE_E3_A0);
12312                 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
12313                         SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12314         }
12315
12316         if (IS_MF(bp)) {
12317                 SET_FLAGS(flags, MODE_MF);
12318                 switch (bp->mf_mode) {
12319                 case MULTI_FUNCTION_SD:
12320                         SET_FLAGS(flags, MODE_MF_SD);
12321                         break;
12322                 case MULTI_FUNCTION_SI:
12323                         SET_FLAGS(flags, MODE_MF_SI);
12324                         break;
12325                 case MULTI_FUNCTION_AFEX:
12326                         SET_FLAGS(flags, MODE_MF_AFEX);
12327                         break;
12328                 }
12329         } else
12330                 SET_FLAGS(flags, MODE_SF);
12331
12332 #if defined(__LITTLE_ENDIAN)
12333         SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12334 #else /*(__BIG_ENDIAN)*/
12335         SET_FLAGS(flags, MODE_BIG_ENDIAN);
12336 #endif
12337         INIT_MODE_FLAGS(bp) = flags;
12338 }
12339
12340 static int bnx2x_init_bp(struct bnx2x *bp)
12341 {
12342         int func;
12343         int rc;
12344
12345         mutex_init(&bp->port.phy_mutex);
12346         mutex_init(&bp->fw_mb_mutex);
12347         mutex_init(&bp->drv_info_mutex);
12348         sema_init(&bp->stats_lock, 1);
12349         bp->drv_info_mng_owner = false;
12350         INIT_LIST_HEAD(&bp->vlan_reg);
12351
12352         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12353         INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12354         INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12355         INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12356         if (IS_PF(bp)) {
12357                 rc = bnx2x_get_hwinfo(bp);
12358                 if (rc)
12359                         return rc;
12360         } else {
12361                 eth_zero_addr(bp->dev->dev_addr);
12362         }
12363
12364         bnx2x_set_modes_bitmap(bp);
12365
12366         rc = bnx2x_alloc_mem_bp(bp);
12367         if (rc)
12368                 return rc;
12369
12370         bnx2x_read_fwinfo(bp);
12371
12372         func = BP_FUNC(bp);
12373
12374         /* need to reset chip if undi was active */
12375         if (IS_PF(bp) && !BP_NOMCP(bp)) {
12376                 /* init fw_seq */
12377                 bp->fw_seq =
12378                         SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12379                                                         DRV_MSG_SEQ_NUMBER_MASK;
12380                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12381
12382                 rc = bnx2x_prev_unload(bp);
12383                 if (rc) {
12384                         bnx2x_free_mem_bp(bp);
12385                         return rc;
12386                 }
12387         }
12388
12389         if (CHIP_REV_IS_FPGA(bp))
12390                 dev_err(&bp->pdev->dev, "FPGA detected\n");
12391
12392         if (BP_NOMCP(bp) && (func == 0))
12393                 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12394
12395         bp->disable_tpa = disable_tpa;
12396         bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12397         /* Reduce memory usage in kdump environment by disabling TPA */
12398         bp->disable_tpa |= is_kdump_kernel();
12399
12400         /* Set TPA flags */
12401         if (bp->disable_tpa) {
12402                 bp->dev->hw_features &= ~NETIF_F_LRO;
12403                 bp->dev->features &= ~NETIF_F_LRO;
12404         }
12405
12406         if (CHIP_IS_E1(bp))
12407                 bp->dropless_fc = 0;
12408         else
12409                 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12410
12411         bp->mrrs = mrrs;
12412
12413         bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12414         if (IS_VF(bp))
12415                 bp->rx_ring_size = MAX_RX_AVAIL;
12416
12417         /* make sure that the numbers are in the right granularity */
12418         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12419         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12420
12421         bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12422
12423         init_timer(&bp->timer);
12424         bp->timer.expires = jiffies + bp->current_interval;
12425         bp->timer.data = (unsigned long) bp;
12426         bp->timer.function = bnx2x_timer;
12427
12428         if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12429             SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12430             SHMEM2_HAS(bp, dcbx_en) &&
12431             SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12432             SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12433             SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12434                 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12435                 bnx2x_dcbx_init_params(bp);
12436         } else {
12437                 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12438         }
12439
12440         if (CHIP_IS_E1x(bp))
12441                 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12442         else
12443                 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12444
12445         /* multiple tx priority */
12446         if (IS_VF(bp))
12447                 bp->max_cos = 1;
12448         else if (CHIP_IS_E1x(bp))
12449                 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12450         else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12451                 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12452         else if (CHIP_IS_E3B0(bp))
12453                 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12454         else
12455                 BNX2X_ERR("unknown chip %x revision %x\n",
12456                           CHIP_NUM(bp), CHIP_REV(bp));
12457         BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12458
12459         /* We need at least one default status block for slow-path events,
12460          * second status block for the L2 queue, and a third status block for
12461          * CNIC if supported.
12462          */
12463         if (IS_VF(bp))
12464                 bp->min_msix_vec_cnt = 1;
12465         else if (CNIC_SUPPORT(bp))
12466                 bp->min_msix_vec_cnt = 3;
12467         else /* PF w/o cnic */
12468                 bp->min_msix_vec_cnt = 2;
12469         BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12470
12471         bp->dump_preset_idx = 1;
12472
12473         if (CHIP_IS_E3B0(bp))
12474                 bp->flags |= PTP_SUPPORTED;
12475
12476         return rc;
12477 }
12478
12479 /****************************************************************************
12480 * General service functions
12481 ****************************************************************************/
12482
12483 /*
12484  * net_device service functions
12485  */
12486
12487 /* called with rtnl_lock */
12488 static int bnx2x_open(struct net_device *dev)
12489 {
12490         struct bnx2x *bp = netdev_priv(dev);
12491         int rc;
12492
12493         bp->stats_init = true;
12494
12495         netif_carrier_off(dev);
12496
12497         bnx2x_set_power_state(bp, PCI_D0);
12498
12499         /* If parity had happen during the unload, then attentions
12500          * and/or RECOVERY_IN_PROGRES may still be set. In this case we
12501          * want the first function loaded on the current engine to
12502          * complete the recovery.
12503          * Parity recovery is only relevant for PF driver.
12504          */
12505         if (IS_PF(bp)) {
12506                 int other_engine = BP_PATH(bp) ? 0 : 1;
12507                 bool other_load_status, load_status;
12508                 bool global = false;
12509
12510                 other_load_status = bnx2x_get_load_status(bp, other_engine);
12511                 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12512                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12513                     bnx2x_chk_parity_attn(bp, &global, true)) {
12514                         do {
12515                                 /* If there are attentions and they are in a
12516                                  * global blocks, set the GLOBAL_RESET bit
12517                                  * regardless whether it will be this function
12518                                  * that will complete the recovery or not.
12519                                  */
12520                                 if (global)
12521                                         bnx2x_set_reset_global(bp);
12522
12523                                 /* Only the first function on the current
12524                                  * engine should try to recover in open. In case
12525                                  * of attentions in global blocks only the first
12526                                  * in the chip should try to recover.
12527                                  */
12528                                 if ((!load_status &&
12529                                      (!global || !other_load_status)) &&
12530                                       bnx2x_trylock_leader_lock(bp) &&
12531                                       !bnx2x_leader_reset(bp)) {
12532                                         netdev_info(bp->dev,
12533                                                     "Recovered in open\n");
12534                                         break;
12535                                 }
12536
12537                                 /* recovery has failed... */
12538                                 bnx2x_set_power_state(bp, PCI_D3hot);
12539                                 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12540
12541                                 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12542                                           "If you still see this message after a few retries then power cycle is required.\n");
12543
12544                                 return -EAGAIN;
12545                         } while (0);
12546                 }
12547         }
12548
12549         bp->recovery_state = BNX2X_RECOVERY_DONE;
12550         rc = bnx2x_nic_load(bp, LOAD_OPEN);
12551         if (rc)
12552                 return rc;
12553
12554 #ifdef CONFIG_BNX2X_VXLAN
12555         if (IS_PF(bp))
12556                 vxlan_get_rx_port(dev);
12557 #endif
12558 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
12559         if (IS_PF(bp))
12560                 geneve_get_rx_port(dev);
12561 #endif
12562
12563         return 0;
12564 }
12565
12566 /* called with rtnl_lock */
12567 static int bnx2x_close(struct net_device *dev)
12568 {
12569         struct bnx2x *bp = netdev_priv(dev);
12570
12571         /* Unload the driver, release IRQs */
12572         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12573
12574         return 0;
12575 }
12576
12577 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12578                                       struct bnx2x_mcast_ramrod_params *p)
12579 {
12580         int mc_count = netdev_mc_count(bp->dev);
12581         struct bnx2x_mcast_list_elem *mc_mac =
12582                 kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
12583         struct netdev_hw_addr *ha;
12584
12585         if (!mc_mac)
12586                 return -ENOMEM;
12587
12588         INIT_LIST_HEAD(&p->mcast_list);
12589
12590         netdev_for_each_mc_addr(ha, bp->dev) {
12591                 mc_mac->mac = bnx2x_mc_addr(ha);
12592                 list_add_tail(&mc_mac->link, &p->mcast_list);
12593                 mc_mac++;
12594         }
12595
12596         p->mcast_list_len = mc_count;
12597
12598         return 0;
12599 }
12600
12601 static void bnx2x_free_mcast_macs_list(
12602         struct bnx2x_mcast_ramrod_params *p)
12603 {
12604         struct bnx2x_mcast_list_elem *mc_mac =
12605                 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
12606                                  link);
12607
12608         WARN_ON(!mc_mac);
12609         kfree(mc_mac);
12610 }
12611
12612 /**
12613  * bnx2x_set_uc_list - configure a new unicast MACs list.
12614  *
12615  * @bp: driver handle
12616  *
12617  * We will use zero (0) as a MAC type for these MACs.
12618  */
12619 static int bnx2x_set_uc_list(struct bnx2x *bp)
12620 {
12621         int rc;
12622         struct net_device *dev = bp->dev;
12623         struct netdev_hw_addr *ha;
12624         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12625         unsigned long ramrod_flags = 0;
12626
12627         /* First schedule a cleanup up of old configuration */
12628         rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12629         if (rc < 0) {
12630                 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12631                 return rc;
12632         }
12633
12634         netdev_for_each_uc_addr(ha, dev) {
12635                 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12636                                        BNX2X_UC_LIST_MAC, &ramrod_flags);
12637                 if (rc == -EEXIST) {
12638                         DP(BNX2X_MSG_SP,
12639                            "Failed to schedule ADD operations: %d\n", rc);
12640                         /* do not treat adding same MAC as error */
12641                         rc = 0;
12642
12643                 } else if (rc < 0) {
12644
12645                         BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12646                                   rc);
12647                         return rc;
12648                 }
12649         }
12650
12651         /* Execute the pending commands */
12652         __set_bit(RAMROD_CONT, &ramrod_flags);
12653         return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
12654                                  BNX2X_UC_LIST_MAC, &ramrod_flags);
12655 }
12656
12657 static int bnx2x_set_mc_list(struct bnx2x *bp)
12658 {
12659         struct net_device *dev = bp->dev;
12660         struct bnx2x_mcast_ramrod_params rparam = {NULL};
12661         int rc = 0;
12662
12663         rparam.mcast_obj = &bp->mcast_obj;
12664
12665         /* first, clear all configured multicast MACs */
12666         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12667         if (rc < 0) {
12668                 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12669                 return rc;
12670         }
12671
12672         /* then, configure a new MACs list */
12673         if (netdev_mc_count(dev)) {
12674                 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
12675                 if (rc) {
12676                         BNX2X_ERR("Failed to create multicast MACs list: %d\n",
12677                                   rc);
12678                         return rc;
12679                 }
12680
12681                 /* Now add the new MACs */
12682                 rc = bnx2x_config_mcast(bp, &rparam,
12683                                         BNX2X_MCAST_CMD_ADD);
12684                 if (rc < 0)
12685                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12686                                   rc);
12687
12688                 bnx2x_free_mcast_macs_list(&rparam);
12689         }
12690
12691         return rc;
12692 }
12693
12694 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
12695 static void bnx2x_set_rx_mode(struct net_device *dev)
12696 {
12697         struct bnx2x *bp = netdev_priv(dev);
12698
12699         if (bp->state != BNX2X_STATE_OPEN) {
12700                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12701                 return;
12702         } else {
12703                 /* Schedule an SP task to handle rest of change */
12704                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12705                                        NETIF_MSG_IFUP);
12706         }
12707 }
12708
12709 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12710 {
12711         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12712
12713         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12714
12715         netif_addr_lock_bh(bp->dev);
12716
12717         if (bp->dev->flags & IFF_PROMISC) {
12718                 rx_mode = BNX2X_RX_MODE_PROMISC;
12719         } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12720                    ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12721                     CHIP_IS_E1(bp))) {
12722                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12723         } else {
12724                 if (IS_PF(bp)) {
12725                         /* some multicasts */
12726                         if (bnx2x_set_mc_list(bp) < 0)
12727                                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12728
12729                         /* release bh lock, as bnx2x_set_uc_list might sleep */
12730                         netif_addr_unlock_bh(bp->dev);
12731                         if (bnx2x_set_uc_list(bp) < 0)
12732                                 rx_mode = BNX2X_RX_MODE_PROMISC;
12733                         netif_addr_lock_bh(bp->dev);
12734                 } else {
12735                         /* configuring mcast to a vf involves sleeping (when we
12736                          * wait for the pf's response).
12737                          */
12738                         bnx2x_schedule_sp_rtnl(bp,
12739                                                BNX2X_SP_RTNL_VFPF_MCAST, 0);
12740                 }
12741         }
12742
12743         bp->rx_mode = rx_mode;
12744         /* handle ISCSI SD mode */
12745         if (IS_MF_ISCSI_ONLY(bp))
12746                 bp->rx_mode = BNX2X_RX_MODE_NONE;
12747
12748         /* Schedule the rx_mode command */
12749         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12750                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12751                 netif_addr_unlock_bh(bp->dev);
12752                 return;
12753         }
12754
12755         if (IS_PF(bp)) {
12756                 bnx2x_set_storm_rx_mode(bp);
12757                 netif_addr_unlock_bh(bp->dev);
12758         } else {
12759                 /* VF will need to request the PF to make this change, and so
12760                  * the VF needs to release the bottom-half lock prior to the
12761                  * request (as it will likely require sleep on the VF side)
12762                  */
12763                 netif_addr_unlock_bh(bp->dev);
12764                 bnx2x_vfpf_storm_rx_mode(bp);
12765         }
12766 }
12767
12768 /* called with rtnl_lock */
12769 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12770                            int devad, u16 addr)
12771 {
12772         struct bnx2x *bp = netdev_priv(netdev);
12773         u16 value;
12774         int rc;
12775
12776         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12777            prtad, devad, addr);
12778
12779         /* The HW expects different devad if CL22 is used */
12780         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12781
12782         bnx2x_acquire_phy_lock(bp);
12783         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12784         bnx2x_release_phy_lock(bp);
12785         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12786
12787         if (!rc)
12788                 rc = value;
12789         return rc;
12790 }
12791
12792 /* called with rtnl_lock */
12793 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12794                             u16 addr, u16 value)
12795 {
12796         struct bnx2x *bp = netdev_priv(netdev);
12797         int rc;
12798
12799         DP(NETIF_MSG_LINK,
12800            "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12801            prtad, devad, addr, value);
12802
12803         /* The HW expects different devad if CL22 is used */
12804         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12805
12806         bnx2x_acquire_phy_lock(bp);
12807         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12808         bnx2x_release_phy_lock(bp);
12809         return rc;
12810 }
12811
12812 /* called with rtnl_lock */
12813 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12814 {
12815         struct bnx2x *bp = netdev_priv(dev);
12816         struct mii_ioctl_data *mdio = if_mii(ifr);
12817
12818         if (!netif_running(dev))
12819                 return -EAGAIN;
12820
12821         switch (cmd) {
12822         case SIOCSHWTSTAMP:
12823                 return bnx2x_hwtstamp_ioctl(bp, ifr);
12824         default:
12825                 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12826                    mdio->phy_id, mdio->reg_num, mdio->val_in);
12827                 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12828         }
12829 }
12830
12831 #ifdef CONFIG_NET_POLL_CONTROLLER
12832 static void poll_bnx2x(struct net_device *dev)
12833 {
12834         struct bnx2x *bp = netdev_priv(dev);
12835         int i;
12836
12837         for_each_eth_queue(bp, i) {
12838                 struct bnx2x_fastpath *fp = &bp->fp[i];
12839                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12840         }
12841 }
12842 #endif
12843
12844 static int bnx2x_validate_addr(struct net_device *dev)
12845 {
12846         struct bnx2x *bp = netdev_priv(dev);
12847
12848         /* query the bulletin board for mac address configured by the PF */
12849         if (IS_VF(bp))
12850                 bnx2x_sample_bulletin(bp);
12851
12852         if (!is_valid_ether_addr(dev->dev_addr)) {
12853                 BNX2X_ERR("Non-valid Ethernet address\n");
12854                 return -EADDRNOTAVAIL;
12855         }
12856         return 0;
12857 }
12858
12859 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12860                                   struct netdev_phys_item_id *ppid)
12861 {
12862         struct bnx2x *bp = netdev_priv(netdev);
12863
12864         if (!(bp->flags & HAS_PHYS_PORT_ID))
12865                 return -EOPNOTSUPP;
12866
12867         ppid->id_len = sizeof(bp->phys_port_id);
12868         memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12869
12870         return 0;
12871 }
12872
12873 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12874                                               struct net_device *dev,
12875                                               netdev_features_t features)
12876 {
12877         features = vlan_features_check(skb, features);
12878         return vxlan_features_check(skb, features);
12879 }
12880
12881 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12882 {
12883         int rc;
12884
12885         if (IS_PF(bp)) {
12886                 unsigned long ramrod_flags = 0;
12887
12888                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12889                 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12890                                         add, &ramrod_flags);
12891         } else {
12892                 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12893         }
12894
12895         return rc;
12896 }
12897
12898 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12899 {
12900         struct bnx2x_vlan_entry *vlan;
12901         int rc = 0;
12902
12903         /* Configure all non-configured entries */
12904         list_for_each_entry(vlan, &bp->vlan_reg, link) {
12905                 if (vlan->hw)
12906                         continue;
12907
12908                 if (bp->vlan_cnt >= bp->vlan_credit)
12909                         return -ENOBUFS;
12910
12911                 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12912                 if (rc) {
12913                         BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
12914                         return rc;
12915                 }
12916
12917                 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
12918                 vlan->hw = true;
12919                 bp->vlan_cnt++;
12920         }
12921
12922         return 0;
12923 }
12924
12925 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12926 {
12927         bool need_accept_any_vlan;
12928
12929         need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12930
12931         if (bp->accept_any_vlan != need_accept_any_vlan) {
12932                 bp->accept_any_vlan = need_accept_any_vlan;
12933                 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
12934                    bp->accept_any_vlan ? "raised" : "cleared");
12935                 if (set_rx_mode) {
12936                         if (IS_PF(bp))
12937                                 bnx2x_set_rx_mode_inner(bp);
12938                         else
12939                                 bnx2x_vfpf_storm_rx_mode(bp);
12940                 }
12941         }
12942 }
12943
12944 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12945 {
12946         struct bnx2x_vlan_entry *vlan;
12947
12948         /* The hw forgot all entries after reload */
12949         list_for_each_entry(vlan, &bp->vlan_reg, link)
12950                 vlan->hw = false;
12951         bp->vlan_cnt = 0;
12952
12953         /* Don't set rx mode here. Our caller will do it. */
12954         bnx2x_vlan_configure(bp, false);
12955
12956         return 0;
12957 }
12958
12959 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12960 {
12961         struct bnx2x *bp = netdev_priv(dev);
12962         struct bnx2x_vlan_entry *vlan;
12963
12964         DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12965
12966         vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12967         if (!vlan)
12968                 return -ENOMEM;
12969
12970         vlan->vid = vid;
12971         vlan->hw = false;
12972         list_add_tail(&vlan->link, &bp->vlan_reg);
12973
12974         if (netif_running(dev))
12975                 bnx2x_vlan_configure(bp, true);
12976
12977         return 0;
12978 }
12979
12980 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12981 {
12982         struct bnx2x *bp = netdev_priv(dev);
12983         struct bnx2x_vlan_entry *vlan;
12984         bool found = false;
12985         int rc = 0;
12986
12987         DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12988
12989         list_for_each_entry(vlan, &bp->vlan_reg, link)
12990                 if (vlan->vid == vid) {
12991                         found = true;
12992                         break;
12993                 }
12994
12995         if (!found) {
12996                 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
12997                 return -EINVAL;
12998         }
12999
13000         if (netif_running(dev) && vlan->hw) {
13001                 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
13002                 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
13003                 bp->vlan_cnt--;
13004         }
13005
13006         list_del(&vlan->link);
13007         kfree(vlan);
13008
13009         if (netif_running(dev))
13010                 bnx2x_vlan_configure(bp, true);
13011
13012         DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13013
13014         return rc;
13015 }
13016
13017 static const struct net_device_ops bnx2x_netdev_ops = {
13018         .ndo_open               = bnx2x_open,
13019         .ndo_stop               = bnx2x_close,
13020         .ndo_start_xmit         = bnx2x_start_xmit,
13021         .ndo_select_queue       = bnx2x_select_queue,
13022         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
13023         .ndo_set_mac_address    = bnx2x_change_mac_addr,
13024         .ndo_validate_addr      = bnx2x_validate_addr,
13025         .ndo_do_ioctl           = bnx2x_ioctl,
13026         .ndo_change_mtu         = bnx2x_change_mtu,
13027         .ndo_fix_features       = bnx2x_fix_features,
13028         .ndo_set_features       = bnx2x_set_features,
13029         .ndo_tx_timeout         = bnx2x_tx_timeout,
13030         .ndo_vlan_rx_add_vid    = bnx2x_vlan_rx_add_vid,
13031         .ndo_vlan_rx_kill_vid   = bnx2x_vlan_rx_kill_vid,
13032 #ifdef CONFIG_NET_POLL_CONTROLLER
13033         .ndo_poll_controller    = poll_bnx2x,
13034 #endif
13035         .ndo_setup_tc           = __bnx2x_setup_tc,
13036 #ifdef CONFIG_BNX2X_SRIOV
13037         .ndo_set_vf_mac         = bnx2x_set_vf_mac,
13038         .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
13039         .ndo_get_vf_config      = bnx2x_get_vf_config,
13040 #endif
13041 #ifdef NETDEV_FCOE_WWNN
13042         .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
13043 #endif
13044
13045         .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
13046         .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
13047         .ndo_features_check     = bnx2x_features_check,
13048 #ifdef CONFIG_BNX2X_VXLAN
13049         .ndo_add_vxlan_port     = bnx2x_add_vxlan_port,
13050         .ndo_del_vxlan_port     = bnx2x_del_vxlan_port,
13051 #endif
13052 #if IS_ENABLED(CONFIG_BNX2X_GENEVE)
13053         .ndo_add_geneve_port    = bnx2x_add_geneve_port,
13054         .ndo_del_geneve_port    = bnx2x_del_geneve_port,
13055 #endif
13056 };
13057
13058 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
13059 {
13060         struct device *dev = &bp->pdev->dev;
13061
13062         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) != 0 &&
13063             dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
13064                 dev_err(dev, "System does not support DMA, aborting\n");
13065                 return -EIO;
13066         }
13067
13068         return 0;
13069 }
13070
13071 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13072 {
13073         if (bp->flags & AER_ENABLED) {
13074                 pci_disable_pcie_error_reporting(bp->pdev);
13075                 bp->flags &= ~AER_ENABLED;
13076         }
13077 }
13078
13079 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13080                           struct net_device *dev, unsigned long board_type)
13081 {
13082         int rc;
13083         u32 pci_cfg_dword;
13084         bool chip_is_e1x = (board_type == BCM57710 ||
13085                             board_type == BCM57711 ||
13086                             board_type == BCM57711E);
13087
13088         SET_NETDEV_DEV(dev, &pdev->dev);
13089
13090         bp->dev = dev;
13091         bp->pdev = pdev;
13092
13093         rc = pci_enable_device(pdev);
13094         if (rc) {
13095                 dev_err(&bp->pdev->dev,
13096                         "Cannot enable PCI device, aborting\n");
13097                 goto err_out;
13098         }
13099
13100         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13101                 dev_err(&bp->pdev->dev,
13102                         "Cannot find PCI device base address, aborting\n");
13103                 rc = -ENODEV;
13104                 goto err_out_disable;
13105         }
13106
13107         if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13108                 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13109                 rc = -ENODEV;
13110                 goto err_out_disable;
13111         }
13112
13113         pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13114         if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13115             PCICFG_REVESION_ID_ERROR_VAL) {
13116                 pr_err("PCI device error, probably due to fan failure, aborting\n");
13117                 rc = -ENODEV;
13118                 goto err_out_disable;
13119         }
13120
13121         if (atomic_read(&pdev->enable_cnt) == 1) {
13122                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13123                 if (rc) {
13124                         dev_err(&bp->pdev->dev,
13125                                 "Cannot obtain PCI resources, aborting\n");
13126                         goto err_out_disable;
13127                 }
13128
13129                 pci_set_master(pdev);
13130                 pci_save_state(pdev);
13131         }
13132
13133         if (IS_PF(bp)) {
13134                 if (!pdev->pm_cap) {
13135                         dev_err(&bp->pdev->dev,
13136                                 "Cannot find power management capability, aborting\n");
13137                         rc = -EIO;
13138                         goto err_out_release;
13139                 }
13140         }
13141
13142         if (!pci_is_pcie(pdev)) {
13143                 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13144                 rc = -EIO;
13145                 goto err_out_release;
13146         }
13147
13148         rc = bnx2x_set_coherency_mask(bp);
13149         if (rc)
13150                 goto err_out_release;
13151
13152         dev->mem_start = pci_resource_start(pdev, 0);
13153         dev->base_addr = dev->mem_start;
13154         dev->mem_end = pci_resource_end(pdev, 0);
13155
13156         dev->irq = pdev->irq;
13157
13158         bp->regview = pci_ioremap_bar(pdev, 0);
13159         if (!bp->regview) {
13160                 dev_err(&bp->pdev->dev,
13161                         "Cannot map register space, aborting\n");
13162                 rc = -ENOMEM;
13163                 goto err_out_release;
13164         }
13165
13166         /* In E1/E1H use pci device function given by kernel.
13167          * In E2/E3 read physical function from ME register since these chips
13168          * support Physical Device Assignment where kernel BDF maybe arbitrary
13169          * (depending on hypervisor).
13170          */
13171         if (chip_is_e1x) {
13172                 bp->pf_num = PCI_FUNC(pdev->devfn);
13173         } else {
13174                 /* chip is E2/3*/
13175                 pci_read_config_dword(bp->pdev,
13176                                       PCICFG_ME_REGISTER, &pci_cfg_dword);
13177                 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13178                                   ME_REG_ABS_PF_NUM_SHIFT);
13179         }
13180         BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13181
13182         /* clean indirect addresses */
13183         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13184                                PCICFG_VENDOR_ID_OFFSET);
13185
13186         /* Set PCIe reset type to fundamental for EEH recovery */
13187         pdev->needs_freset = 1;
13188
13189         /* AER (Advanced Error reporting) configuration */
13190         rc = pci_enable_pcie_error_reporting(pdev);
13191         if (!rc)
13192                 bp->flags |= AER_ENABLED;
13193         else
13194                 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13195
13196         /*
13197          * Clean the following indirect addresses for all functions since it
13198          * is not used by the driver.
13199          */
13200         if (IS_PF(bp)) {
13201                 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13202                 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13203                 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13204                 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13205
13206                 if (chip_is_e1x) {
13207                         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13208                         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13209                         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13210                         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13211                 }
13212
13213                 /* Enable internal target-read (in case we are probed after PF
13214                  * FLR). Must be done prior to any BAR read access. Only for
13215                  * 57712 and up
13216                  */
13217                 if (!chip_is_e1x)
13218                         REG_WR(bp,
13219                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13220         }
13221
13222         dev->watchdog_timeo = TX_TIMEOUT;
13223
13224         dev->netdev_ops = &bnx2x_netdev_ops;
13225         bnx2x_set_ethtool_ops(bp, dev);
13226
13227         dev->priv_flags |= IFF_UNICAST_FLT;
13228
13229         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13230                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13231                 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
13232                 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13233         if (!chip_is_e1x) {
13234                 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
13235                                     NETIF_F_GSO_IPXIP4;
13236                 dev->hw_enc_features =
13237                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13238                         NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13239                         NETIF_F_GSO_IPXIP4 |
13240                         NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
13241         }
13242
13243         dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13244                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13245
13246         /* VF with OLD Hypervisor or old PF do not support filtering */
13247         if (IS_PF(bp)) {
13248                 if (chip_is_e1x)
13249                         bp->accept_any_vlan = true;
13250                 else
13251                         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13252 #ifdef CONFIG_BNX2X_SRIOV
13253         } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13254                 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13255 #endif
13256         }
13257
13258         dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13259         dev->features |= NETIF_F_HIGHDMA;
13260
13261         /* Add Loopback capability to the device */
13262         dev->hw_features |= NETIF_F_LOOPBACK;
13263
13264 #ifdef BCM_DCBNL
13265         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13266 #endif
13267
13268         /* get_port_hwinfo() will set prtad and mmds properly */
13269         bp->mdio.prtad = MDIO_PRTAD_NONE;
13270         bp->mdio.mmds = 0;
13271         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13272         bp->mdio.dev = dev;
13273         bp->mdio.mdio_read = bnx2x_mdio_read;
13274         bp->mdio.mdio_write = bnx2x_mdio_write;
13275
13276         return 0;
13277
13278 err_out_release:
13279         if (atomic_read(&pdev->enable_cnt) == 1)
13280                 pci_release_regions(pdev);
13281
13282 err_out_disable:
13283         pci_disable_device(pdev);
13284
13285 err_out:
13286         return rc;
13287 }
13288
13289 static int bnx2x_check_firmware(struct bnx2x *bp)
13290 {
13291         const struct firmware *firmware = bp->firmware;
13292         struct bnx2x_fw_file_hdr *fw_hdr;
13293         struct bnx2x_fw_file_section *sections;
13294         u32 offset, len, num_ops;
13295         __be16 *ops_offsets;
13296         int i;
13297         const u8 *fw_ver;
13298
13299         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13300                 BNX2X_ERR("Wrong FW size\n");
13301                 return -EINVAL;
13302         }
13303
13304         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13305         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13306
13307         /* Make sure none of the offsets and sizes make us read beyond
13308          * the end of the firmware data */
13309         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13310                 offset = be32_to_cpu(sections[i].offset);
13311                 len = be32_to_cpu(sections[i].len);
13312                 if (offset + len > firmware->size) {
13313                         BNX2X_ERR("Section %d length is out of bounds\n", i);
13314                         return -EINVAL;
13315                 }
13316         }
13317
13318         /* Likewise for the init_ops offsets */
13319         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13320         ops_offsets = (__force __be16 *)(firmware->data + offset);
13321         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13322
13323         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13324                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13325                         BNX2X_ERR("Section offset %d is out of bounds\n", i);
13326                         return -EINVAL;
13327                 }
13328         }
13329
13330         /* Check FW version */
13331         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13332         fw_ver = firmware->data + offset;
13333         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13334             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13335             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13336             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13337                 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13338                        fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13339                        BCM_5710_FW_MAJOR_VERSION,
13340                        BCM_5710_FW_MINOR_VERSION,
13341                        BCM_5710_FW_REVISION_VERSION,
13342                        BCM_5710_FW_ENGINEERING_VERSION);
13343                 return -EINVAL;
13344         }
13345
13346         return 0;
13347 }
13348
13349 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13350 {
13351         const __be32 *source = (const __be32 *)_source;
13352         u32 *target = (u32 *)_target;
13353         u32 i;
13354
13355         for (i = 0; i < n/4; i++)
13356                 target[i] = be32_to_cpu(source[i]);
13357 }
13358
13359 /*
13360    Ops array is stored in the following format:
13361    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13362  */
13363 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13364 {
13365         const __be32 *source = (const __be32 *)_source;
13366         struct raw_op *target = (struct raw_op *)_target;
13367         u32 i, j, tmp;
13368
13369         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13370                 tmp = be32_to_cpu(source[j]);
13371                 target[i].op = (tmp >> 24) & 0xff;
13372                 target[i].offset = tmp & 0xffffff;
13373                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13374         }
13375 }
13376
13377 /* IRO array is stored in the following format:
13378  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
13379  */
13380 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13381 {
13382         const __be32 *source = (const __be32 *)_source;
13383         struct iro *target = (struct iro *)_target;
13384         u32 i, j, tmp;
13385
13386         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13387                 target[i].base = be32_to_cpu(source[j]);
13388                 j++;
13389                 tmp = be32_to_cpu(source[j]);
13390                 target[i].m1 = (tmp >> 16) & 0xffff;
13391                 target[i].m2 = tmp & 0xffff;
13392                 j++;
13393                 tmp = be32_to_cpu(source[j]);
13394                 target[i].m3 = (tmp >> 16) & 0xffff;
13395                 target[i].size = tmp & 0xffff;
13396                 j++;
13397         }
13398 }
13399
13400 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13401 {
13402         const __be16 *source = (const __be16 *)_source;
13403         u16 *target = (u16 *)_target;
13404         u32 i;
13405
13406         for (i = 0; i < n/2; i++)
13407                 target[i] = be16_to_cpu(source[i]);
13408 }
13409
13410 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13411 do {                                                                    \
13412         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13413         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13414         if (!bp->arr)                                                   \
13415                 goto lbl;                                               \
13416         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13417              (u8 *)bp->arr, len);                                       \
13418 } while (0)
13419
13420 static int bnx2x_init_firmware(struct bnx2x *bp)
13421 {
13422         const char *fw_file_name;
13423         struct bnx2x_fw_file_hdr *fw_hdr;
13424         int rc;
13425
13426         if (bp->firmware)
13427                 return 0;
13428
13429         if (CHIP_IS_E1(bp))
13430                 fw_file_name = FW_FILE_NAME_E1;
13431         else if (CHIP_IS_E1H(bp))
13432                 fw_file_name = FW_FILE_NAME_E1H;
13433         else if (!CHIP_IS_E1x(bp))
13434                 fw_file_name = FW_FILE_NAME_E2;
13435         else {
13436                 BNX2X_ERR("Unsupported chip revision\n");
13437                 return -EINVAL;
13438         }
13439         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13440
13441         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13442         if (rc) {
13443                 BNX2X_ERR("Can't load firmware file %s\n",
13444                           fw_file_name);
13445                 goto request_firmware_exit;
13446         }
13447
13448         rc = bnx2x_check_firmware(bp);
13449         if (rc) {
13450                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13451                 goto request_firmware_exit;
13452         }
13453
13454         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13455
13456         /* Initialize the pointers to the init arrays */
13457         /* Blob */
13458         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13459
13460         /* Opcodes */
13461         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13462
13463         /* Offsets */
13464         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13465                             be16_to_cpu_n);
13466
13467         /* STORMs firmware */
13468         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13469                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13470         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13471                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13472         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13473                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13474         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13475                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13476         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13477                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13478         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13479                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13480         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13481                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13482         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13483                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13484         /* IRO */
13485         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13486
13487         return 0;
13488
13489 iro_alloc_err:
13490         kfree(bp->init_ops_offsets);
13491 init_offsets_alloc_err:
13492         kfree(bp->init_ops);
13493 init_ops_alloc_err:
13494         kfree(bp->init_data);
13495 request_firmware_exit:
13496         release_firmware(bp->firmware);
13497         bp->firmware = NULL;
13498
13499         return rc;
13500 }
13501
13502 static void bnx2x_release_firmware(struct bnx2x *bp)
13503 {
13504         kfree(bp->init_ops_offsets);
13505         kfree(bp->init_ops);
13506         kfree(bp->init_data);
13507         release_firmware(bp->firmware);
13508         bp->firmware = NULL;
13509 }
13510
13511 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13512         .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13513         .init_hw_cmn      = bnx2x_init_hw_common,
13514         .init_hw_port     = bnx2x_init_hw_port,
13515         .init_hw_func     = bnx2x_init_hw_func,
13516
13517         .reset_hw_cmn     = bnx2x_reset_common,
13518         .reset_hw_port    = bnx2x_reset_port,
13519         .reset_hw_func    = bnx2x_reset_func,
13520
13521         .gunzip_init      = bnx2x_gunzip_init,
13522         .gunzip_end       = bnx2x_gunzip_end,
13523
13524         .init_fw          = bnx2x_init_firmware,
13525         .release_fw       = bnx2x_release_firmware,
13526 };
13527
13528 void bnx2x__init_func_obj(struct bnx2x *bp)
13529 {
13530         /* Prepare DMAE related driver resources */
13531         bnx2x_setup_dmae(bp);
13532
13533         bnx2x_init_func_obj(bp, &bp->func_obj,
13534                             bnx2x_sp(bp, func_rdata),
13535                             bnx2x_sp_mapping(bp, func_rdata),
13536                             bnx2x_sp(bp, func_afex_rdata),
13537                             bnx2x_sp_mapping(bp, func_afex_rdata),
13538                             &bnx2x_func_sp_drv);
13539 }
13540
13541 /* must be called after sriov-enable */
13542 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13543 {
13544         int cid_count = BNX2X_L2_MAX_CID(bp);
13545
13546         if (IS_SRIOV(bp))
13547                 cid_count += BNX2X_VF_CIDS;
13548
13549         if (CNIC_SUPPORT(bp))
13550                 cid_count += CNIC_CID_MAX;
13551
13552         return roundup(cid_count, QM_CID_ROUND);
13553 }
13554
13555 /**
13556  * bnx2x_get_num_none_def_sbs - return the number of none default SBs
13557  *
13558  * @dev:        pci device
13559  *
13560  */
13561 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13562 {
13563         int index;
13564         u16 control = 0;
13565
13566         /*
13567          * If MSI-X is not supported - return number of SBs needed to support
13568          * one fast path queue: one FP queue + SB for CNIC
13569          */
13570         if (!pdev->msix_cap) {
13571                 dev_info(&pdev->dev, "no msix capability found\n");
13572                 return 1 + cnic_cnt;
13573         }
13574         dev_info(&pdev->dev, "msix capability found\n");
13575
13576         /*
13577          * The value in the PCI configuration space is the index of the last
13578          * entry, namely one less than the actual size of the table, which is
13579          * exactly what we want to return from this function: number of all SBs
13580          * without the default SB.
13581          * For VFs there is no default SB, then we return (index+1).
13582          */
13583         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13584
13585         index = control & PCI_MSIX_FLAGS_QSIZE;
13586
13587         return index;
13588 }
13589
13590 static int set_max_cos_est(int chip_id)
13591 {
13592         switch (chip_id) {
13593         case BCM57710:
13594         case BCM57711:
13595         case BCM57711E:
13596                 return BNX2X_MULTI_TX_COS_E1X;
13597         case BCM57712:
13598         case BCM57712_MF:
13599                 return BNX2X_MULTI_TX_COS_E2_E3A0;
13600         case BCM57800:
13601         case BCM57800_MF:
13602         case BCM57810:
13603         case BCM57810_MF:
13604         case BCM57840_4_10:
13605         case BCM57840_2_20:
13606         case BCM57840_O:
13607         case BCM57840_MFO:
13608         case BCM57840_MF:
13609         case BCM57811:
13610         case BCM57811_MF:
13611                 return BNX2X_MULTI_TX_COS_E3B0;
13612         case BCM57712_VF:
13613         case BCM57800_VF:
13614         case BCM57810_VF:
13615         case BCM57840_VF:
13616         case BCM57811_VF:
13617                 return 1;
13618         default:
13619                 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13620                 return -ENODEV;
13621         }
13622 }
13623
13624 static int set_is_vf(int chip_id)
13625 {
13626         switch (chip_id) {
13627         case BCM57712_VF:
13628         case BCM57800_VF:
13629         case BCM57810_VF:
13630         case BCM57840_VF:
13631         case BCM57811_VF:
13632                 return true;
13633         default:
13634                 return false;
13635         }
13636 }
13637
13638 /* nig_tsgen registers relative address */
13639 #define tsgen_ctrl 0x0
13640 #define tsgen_freecount 0x10
13641 #define tsgen_synctime_t0 0x20
13642 #define tsgen_offset_t0 0x28
13643 #define tsgen_drift_t0 0x30
13644 #define tsgen_synctime_t1 0x58
13645 #define tsgen_offset_t1 0x60
13646 #define tsgen_drift_t1 0x68
13647
13648 /* FW workaround for setting drift */
13649 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13650                                           int best_val, int best_period)
13651 {
13652         struct bnx2x_func_state_params func_params = {NULL};
13653         struct bnx2x_func_set_timesync_params *set_timesync_params =
13654                 &func_params.params.set_timesync;
13655
13656         /* Prepare parameters for function state transitions */
13657         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13658         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13659
13660         func_params.f_obj = &bp->func_obj;
13661         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13662
13663         /* Function parameters */
13664         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13665         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13666         set_timesync_params->add_sub_drift_adjust_value =
13667                 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13668         set_timesync_params->drift_adjust_value = best_val;
13669         set_timesync_params->drift_adjust_period = best_period;
13670
13671         return bnx2x_func_state_change(bp, &func_params);
13672 }
13673
13674 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13675 {
13676         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13677         int rc;
13678         int drift_dir = 1;
13679         int val, period, period1, period2, dif, dif1, dif2;
13680         int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13681
13682         DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13683
13684         if (!netif_running(bp->dev)) {
13685                 DP(BNX2X_MSG_PTP,
13686                    "PTP adjfreq called while the interface is down\n");
13687                 return -EFAULT;
13688         }
13689
13690         if (ppb < 0) {
13691                 ppb = -ppb;
13692                 drift_dir = 0;
13693         }
13694
13695         if (ppb == 0) {
13696                 best_val = 1;
13697                 best_period = 0x1FFFFFF;
13698         } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13699                 best_val = 31;
13700                 best_period = 1;
13701         } else {
13702                 /* Changed not to allow val = 8, 16, 24 as these values
13703                  * are not supported in workaround.
13704                  */
13705                 for (val = 0; val <= 31; val++) {
13706                         if ((val & 0x7) == 0)
13707                                 continue;
13708                         period1 = val * 1000000 / ppb;
13709                         period2 = period1 + 1;
13710                         if (period1 != 0)
13711                                 dif1 = ppb - (val * 1000000 / period1);
13712                         else
13713                                 dif1 = BNX2X_MAX_PHC_DRIFT;
13714                         if (dif1 < 0)
13715                                 dif1 = -dif1;
13716                         dif2 = ppb - (val * 1000000 / period2);
13717                         if (dif2 < 0)
13718                                 dif2 = -dif2;
13719                         dif = (dif1 < dif2) ? dif1 : dif2;
13720                         period = (dif1 < dif2) ? period1 : period2;
13721                         if (dif < best_dif) {
13722                                 best_dif = dif;
13723                                 best_val = val;
13724                                 best_period = period;
13725                         }
13726                 }
13727         }
13728
13729         rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13730                                             best_period);
13731         if (rc) {
13732                 BNX2X_ERR("Failed to set drift\n");
13733                 return -EFAULT;
13734         }
13735
13736         DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13737            best_period);
13738
13739         return 0;
13740 }
13741
13742 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13743 {
13744         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13745
13746         DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13747
13748         timecounter_adjtime(&bp->timecounter, delta);
13749
13750         return 0;
13751 }
13752
13753 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13754 {
13755         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13756         u64 ns;
13757
13758         ns = timecounter_read(&bp->timecounter);
13759
13760         DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13761
13762         *ts = ns_to_timespec64(ns);
13763
13764         return 0;
13765 }
13766
13767 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13768                              const struct timespec64 *ts)
13769 {
13770         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13771         u64 ns;
13772
13773         ns = timespec64_to_ns(ts);
13774
13775         DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13776
13777         /* Re-init the timecounter */
13778         timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13779
13780         return 0;
13781 }
13782
13783 /* Enable (or disable) ancillary features of the phc subsystem */
13784 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13785                             struct ptp_clock_request *rq, int on)
13786 {
13787         struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13788
13789         BNX2X_ERR("PHC ancillary features are not supported\n");
13790         return -ENOTSUPP;
13791 }
13792
13793 static void bnx2x_register_phc(struct bnx2x *bp)
13794 {
13795         /* Fill the ptp_clock_info struct and register PTP clock*/
13796         bp->ptp_clock_info.owner = THIS_MODULE;
13797         snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13798         bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT; /* In PPB */
13799         bp->ptp_clock_info.n_alarm = 0;
13800         bp->ptp_clock_info.n_ext_ts = 0;
13801         bp->ptp_clock_info.n_per_out = 0;
13802         bp->ptp_clock_info.pps = 0;
13803         bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13804         bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13805         bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13806         bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13807         bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13808
13809         bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13810         if (IS_ERR(bp->ptp_clock)) {
13811                 bp->ptp_clock = NULL;
13812                 BNX2X_ERR("PTP clock registeration failed\n");
13813         }
13814 }
13815
13816 static int bnx2x_init_one(struct pci_dev *pdev,
13817                                     const struct pci_device_id *ent)
13818 {
13819         struct net_device *dev = NULL;
13820         struct bnx2x *bp;
13821         enum pcie_link_width pcie_width;
13822         enum pci_bus_speed pcie_speed;
13823         int rc, max_non_def_sbs;
13824         int rx_count, tx_count, rss_count, doorbell_size;
13825         int max_cos_est;
13826         bool is_vf;
13827         int cnic_cnt;
13828
13829         /* Management FW 'remembers' living interfaces. Allow it some time
13830          * to forget previously living interfaces, allowing a proper re-load.
13831          */
13832         if (is_kdump_kernel()) {
13833                 ktime_t now = ktime_get_boottime();
13834                 ktime_t fw_ready_time = ktime_set(5, 0);
13835
13836                 if (ktime_before(now, fw_ready_time))
13837                         msleep(ktime_ms_delta(fw_ready_time, now));
13838         }
13839
13840         /* An estimated maximum supported CoS number according to the chip
13841          * version.
13842          * We will try to roughly estimate the maximum number of CoSes this chip
13843          * may support in order to minimize the memory allocated for Tx
13844          * netdev_queue's. This number will be accurately calculated during the
13845          * initialization of bp->max_cos based on the chip versions AND chip
13846          * revision in the bnx2x_init_bp().
13847          */
13848         max_cos_est = set_max_cos_est(ent->driver_data);
13849         if (max_cos_est < 0)
13850                 return max_cos_est;
13851         is_vf = set_is_vf(ent->driver_data);
13852         cnic_cnt = is_vf ? 0 : 1;
13853
13854         max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13855
13856         /* add another SB for VF as it has no default SB */
13857         max_non_def_sbs += is_vf ? 1 : 0;
13858
13859         /* Maximum number of RSS queues: one IGU SB goes to CNIC */
13860         rss_count = max_non_def_sbs - cnic_cnt;
13861
13862         if (rss_count < 1)
13863                 return -EINVAL;
13864
13865         /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
13866         rx_count = rss_count + cnic_cnt;
13867
13868         /* Maximum number of netdev Tx queues:
13869          * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
13870          */
13871         tx_count = rss_count * max_cos_est + cnic_cnt;
13872
13873         /* dev zeroed in init_etherdev */
13874         dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13875         if (!dev)
13876                 return -ENOMEM;
13877
13878         bp = netdev_priv(dev);
13879
13880         bp->flags = 0;
13881         if (is_vf)
13882                 bp->flags |= IS_VF_FLAG;
13883
13884         bp->igu_sb_cnt = max_non_def_sbs;
13885         bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13886         bp->msg_enable = debug;
13887         bp->cnic_support = cnic_cnt;
13888         bp->cnic_probe = bnx2x_cnic_probe;
13889
13890         pci_set_drvdata(pdev, dev);
13891
13892         rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13893         if (rc < 0) {
13894                 free_netdev(dev);
13895                 return rc;
13896         }
13897
13898         BNX2X_DEV_INFO("This is a %s function\n",
13899                        IS_PF(bp) ? "physical" : "virtual");
13900         BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13901         BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13902         BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13903                        tx_count, rx_count);
13904
13905         rc = bnx2x_init_bp(bp);
13906         if (rc)
13907                 goto init_one_exit;
13908
13909         /* Map doorbells here as we need the real value of bp->max_cos which
13910          * is initialized in bnx2x_init_bp() to determine the number of
13911          * l2 connections.
13912          */
13913         if (IS_VF(bp)) {
13914                 bp->doorbells = bnx2x_vf_doorbells(bp);
13915                 rc = bnx2x_vf_pci_alloc(bp);
13916                 if (rc)
13917                         goto init_one_freemem;
13918         } else {
13919                 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13920                 if (doorbell_size > pci_resource_len(pdev, 2)) {
13921                         dev_err(&bp->pdev->dev,
13922                                 "Cannot map doorbells, bar size too small, aborting\n");
13923                         rc = -ENOMEM;
13924                         goto init_one_freemem;
13925                 }
13926                 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
13927                                                 doorbell_size);
13928         }
13929         if (!bp->doorbells) {
13930                 dev_err(&bp->pdev->dev,
13931                         "Cannot map doorbell space, aborting\n");
13932                 rc = -ENOMEM;
13933                 goto init_one_freemem;
13934         }
13935
13936         if (IS_VF(bp)) {
13937                 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13938                 if (rc)
13939                         goto init_one_freemem;
13940         }
13941
13942         /* Enable SRIOV if capability found in configuration space */
13943         rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13944         if (rc)
13945                 goto init_one_freemem;
13946
13947         /* calc qm_cid_count */
13948         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13949         BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13950
13951         /* disable FCOE L2 queue for E1x*/
13952         if (CHIP_IS_E1x(bp))
13953                 bp->flags |= NO_FCOE_FLAG;
13954
13955         /* Set bp->num_queues for MSI-X mode*/
13956         bnx2x_set_num_queues(bp);
13957
13958         /* Configure interrupt mode: try to enable MSI-X/MSI if
13959          * needed.
13960          */
13961         rc = bnx2x_set_int_mode(bp);
13962         if (rc) {
13963                 dev_err(&pdev->dev, "Cannot set interrupts\n");
13964                 goto init_one_freemem;
13965         }
13966         BNX2X_DEV_INFO("set interrupts successfully\n");
13967
13968         /* register the net device */
13969         rc = register_netdev(dev);
13970         if (rc) {
13971                 dev_err(&pdev->dev, "Cannot register net device\n");
13972                 goto init_one_freemem;
13973         }
13974         BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13975
13976         if (!NO_FCOE(bp)) {
13977                 /* Add storage MAC address */
13978                 rtnl_lock();
13979                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
13980                 rtnl_unlock();
13981         }
13982         if (pcie_get_minimum_link(bp->pdev, &pcie_speed, &pcie_width) ||
13983             pcie_speed == PCI_SPEED_UNKNOWN ||
13984             pcie_width == PCIE_LNK_WIDTH_UNKNOWN)
13985                 BNX2X_DEV_INFO("Failed to determine PCI Express Bandwidth\n");
13986         else
13987                 BNX2X_DEV_INFO(
13988                        "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13989                        board_info[ent->driver_data].name,
13990                        (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13991                        pcie_width,
13992                        pcie_speed == PCIE_SPEED_2_5GT ? "2.5GHz" :
13993                        pcie_speed == PCIE_SPEED_5_0GT ? "5.0GHz" :
13994                        pcie_speed == PCIE_SPEED_8_0GT ? "8.0GHz" :
13995                        "Unknown",
13996                        dev->base_addr, bp->pdev->irq, dev->dev_addr);
13997
13998         bnx2x_register_phc(bp);
13999
14000         if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14001                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14002
14003         return 0;
14004
14005 init_one_freemem:
14006         bnx2x_free_mem_bp(bp);
14007
14008 init_one_exit:
14009         bnx2x_disable_pcie_error_reporting(bp);
14010
14011         if (bp->regview)
14012                 iounmap(bp->regview);
14013
14014         if (IS_PF(bp) && bp->doorbells)
14015                 iounmap(bp->doorbells);
14016
14017         free_netdev(dev);
14018
14019         if (atomic_read(&pdev->enable_cnt) == 1)
14020                 pci_release_regions(pdev);
14021
14022         pci_disable_device(pdev);
14023
14024         return rc;
14025 }
14026
14027 static void __bnx2x_remove(struct pci_dev *pdev,
14028                            struct net_device *dev,
14029                            struct bnx2x *bp,
14030                            bool remove_netdev)
14031 {
14032         if (bp->ptp_clock) {
14033                 ptp_clock_unregister(bp->ptp_clock);
14034                 bp->ptp_clock = NULL;
14035         }
14036
14037         /* Delete storage MAC address */
14038         if (!NO_FCOE(bp)) {
14039                 rtnl_lock();
14040                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14041                 rtnl_unlock();
14042         }
14043
14044 #ifdef BCM_DCBNL
14045         /* Delete app tlvs from dcbnl */
14046         bnx2x_dcbnl_update_applist(bp, true);
14047 #endif
14048
14049         if (IS_PF(bp) &&
14050             !BP_NOMCP(bp) &&
14051             (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14052                 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14053
14054         /* Close the interface - either directly or implicitly */
14055         if (remove_netdev) {
14056                 unregister_netdev(dev);
14057         } else {
14058                 rtnl_lock();
14059                 dev_close(dev);
14060                 rtnl_unlock();
14061         }
14062
14063         bnx2x_iov_remove_one(bp);
14064
14065         /* Power on: we can't let PCI layer write to us while we are in D3 */
14066         if (IS_PF(bp)) {
14067                 bnx2x_set_power_state(bp, PCI_D0);
14068                 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14069
14070                 /* Set endianity registers to reset values in case next driver
14071                  * boots in different endianty environment.
14072                  */
14073                 bnx2x_reset_endianity(bp);
14074         }
14075
14076         /* Disable MSI/MSI-X */
14077         bnx2x_disable_msi(bp);
14078
14079         /* Power off */
14080         if (IS_PF(bp))
14081                 bnx2x_set_power_state(bp, PCI_D3hot);
14082
14083         /* Make sure RESET task is not scheduled before continuing */
14084         cancel_delayed_work_sync(&bp->sp_rtnl_task);
14085
14086         /* send message via vfpf channel to release the resources of this vf */
14087         if (IS_VF(bp))
14088                 bnx2x_vfpf_release(bp);
14089
14090         /* Assumes no further PCIe PM changes will occur */
14091         if (system_state == SYSTEM_POWER_OFF) {
14092                 pci_wake_from_d3(pdev, bp->wol);
14093                 pci_set_power_state(pdev, PCI_D3hot);
14094         }
14095
14096         bnx2x_disable_pcie_error_reporting(bp);
14097         if (remove_netdev) {
14098                 if (bp->regview)
14099                         iounmap(bp->regview);
14100
14101                 /* For vfs, doorbells are part of the regview and were unmapped
14102                  * along with it. FW is only loaded by PF.
14103                  */
14104                 if (IS_PF(bp)) {
14105                         if (bp->doorbells)
14106                                 iounmap(bp->doorbells);
14107
14108                         bnx2x_release_firmware(bp);
14109                 } else {
14110                         bnx2x_vf_pci_dealloc(bp);
14111                 }
14112                 bnx2x_free_mem_bp(bp);
14113
14114                 free_netdev(dev);
14115
14116                 if (atomic_read(&pdev->enable_cnt) == 1)
14117                         pci_release_regions(pdev);
14118
14119                 pci_disable_device(pdev);
14120         }
14121 }
14122
14123 static void bnx2x_remove_one(struct pci_dev *pdev)
14124 {
14125         struct net_device *dev = pci_get_drvdata(pdev);
14126         struct bnx2x *bp;
14127
14128         if (!dev) {
14129                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14130                 return;
14131         }
14132         bp = netdev_priv(dev);
14133
14134         __bnx2x_remove(pdev, dev, bp, true);
14135 }
14136
14137 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14138 {
14139         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14140
14141         bp->rx_mode = BNX2X_RX_MODE_NONE;
14142
14143         if (CNIC_LOADED(bp))
14144                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14145
14146         /* Stop Tx */
14147         bnx2x_tx_disable(bp);
14148         /* Delete all NAPI objects */
14149         bnx2x_del_all_napi(bp);
14150         if (CNIC_LOADED(bp))
14151                 bnx2x_del_all_napi_cnic(bp);
14152         netdev_reset_tc(bp->dev);
14153
14154         del_timer_sync(&bp->timer);
14155         cancel_delayed_work_sync(&bp->sp_task);
14156         cancel_delayed_work_sync(&bp->period_task);
14157
14158         if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14159                 bp->stats_state = STATS_STATE_DISABLED;
14160                 up(&bp->stats_lock);
14161         }
14162
14163         bnx2x_save_statistics(bp);
14164
14165         netif_carrier_off(bp->dev);
14166
14167         return 0;
14168 }
14169
14170 /**
14171  * bnx2x_io_error_detected - called when PCI error is detected
14172  * @pdev: Pointer to PCI device
14173  * @state: The current pci connection state
14174  *
14175  * This function is called after a PCI bus error affecting
14176  * this device has been detected.
14177  */
14178 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14179                                                 pci_channel_state_t state)
14180 {
14181         struct net_device *dev = pci_get_drvdata(pdev);
14182         struct bnx2x *bp = netdev_priv(dev);
14183
14184         rtnl_lock();
14185
14186         BNX2X_ERR("IO error detected\n");
14187
14188         netif_device_detach(dev);
14189
14190         if (state == pci_channel_io_perm_failure) {
14191                 rtnl_unlock();
14192                 return PCI_ERS_RESULT_DISCONNECT;
14193         }
14194
14195         if (netif_running(dev))
14196                 bnx2x_eeh_nic_unload(bp);
14197
14198         bnx2x_prev_path_mark_eeh(bp);
14199
14200         pci_disable_device(pdev);
14201
14202         rtnl_unlock();
14203
14204         /* Request a slot reset */
14205         return PCI_ERS_RESULT_NEED_RESET;
14206 }
14207
14208 /**
14209  * bnx2x_io_slot_reset - called after the PCI bus has been reset
14210  * @pdev: Pointer to PCI device
14211  *
14212  * Restart the card from scratch, as if from a cold-boot.
14213  */
14214 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14215 {
14216         struct net_device *dev = pci_get_drvdata(pdev);
14217         struct bnx2x *bp = netdev_priv(dev);
14218         int i;
14219
14220         rtnl_lock();
14221         BNX2X_ERR("IO slot reset initializing...\n");
14222         if (pci_enable_device(pdev)) {
14223                 dev_err(&pdev->dev,
14224                         "Cannot re-enable PCI device after reset\n");
14225                 rtnl_unlock();
14226                 return PCI_ERS_RESULT_DISCONNECT;
14227         }
14228
14229         pci_set_master(pdev);
14230         pci_restore_state(pdev);
14231         pci_save_state(pdev);
14232
14233         if (netif_running(dev))
14234                 bnx2x_set_power_state(bp, PCI_D0);
14235
14236         if (netif_running(dev)) {
14237                 BNX2X_ERR("IO slot reset --> driver unload\n");
14238
14239                 /* MCP should have been reset; Need to wait for validity */
14240                 bnx2x_init_shmem(bp);
14241
14242                 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14243                         u32 v;
14244
14245                         v = SHMEM2_RD(bp,
14246                                       drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14247                         SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14248                                   v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14249                 }
14250                 bnx2x_drain_tx_queues(bp);
14251                 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14252                 bnx2x_netif_stop(bp, 1);
14253                 bnx2x_free_irq(bp);
14254
14255                 /* Report UNLOAD_DONE to MCP */
14256                 bnx2x_send_unload_done(bp, true);
14257
14258                 bp->sp_state = 0;
14259                 bp->port.pmf = 0;
14260
14261                 bnx2x_prev_unload(bp);
14262
14263                 /* We should have reseted the engine, so It's fair to
14264                  * assume the FW will no longer write to the bnx2x driver.
14265                  */
14266                 bnx2x_squeeze_objects(bp);
14267                 bnx2x_free_skbs(bp);
14268                 for_each_rx_queue(bp, i)
14269                         bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14270                 bnx2x_free_fp_mem(bp);
14271                 bnx2x_free_mem(bp);
14272
14273                 bp->state = BNX2X_STATE_CLOSED;
14274         }
14275
14276         rtnl_unlock();
14277
14278         /* If AER, perform cleanup of the PCIe registers */
14279         if (bp->flags & AER_ENABLED) {
14280                 if (pci_cleanup_aer_uncorrect_error_status(pdev))
14281                         BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
14282                 else
14283                         DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
14284         }
14285
14286         return PCI_ERS_RESULT_RECOVERED;
14287 }
14288
14289 /**
14290  * bnx2x_io_resume - called when traffic can start flowing again
14291  * @pdev: Pointer to PCI device
14292  *
14293  * This callback is called when the error recovery driver tells us that
14294  * its OK to resume normal operation.
14295  */
14296 static void bnx2x_io_resume(struct pci_dev *pdev)
14297 {
14298         struct net_device *dev = pci_get_drvdata(pdev);
14299         struct bnx2x *bp = netdev_priv(dev);
14300
14301         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14302                 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14303                 return;
14304         }
14305
14306         rtnl_lock();
14307
14308         bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14309                                                         DRV_MSG_SEQ_NUMBER_MASK;
14310
14311         if (netif_running(dev))
14312                 bnx2x_nic_load(bp, LOAD_NORMAL);
14313
14314         netif_device_attach(dev);
14315
14316         rtnl_unlock();
14317 }
14318
14319 static const struct pci_error_handlers bnx2x_err_handler = {
14320         .error_detected = bnx2x_io_error_detected,
14321         .slot_reset     = bnx2x_io_slot_reset,
14322         .resume         = bnx2x_io_resume,
14323 };
14324
14325 static void bnx2x_shutdown(struct pci_dev *pdev)
14326 {
14327         struct net_device *dev = pci_get_drvdata(pdev);
14328         struct bnx2x *bp;
14329
14330         if (!dev)
14331                 return;
14332
14333         bp = netdev_priv(dev);
14334         if (!bp)
14335                 return;
14336
14337         rtnl_lock();
14338         netif_device_detach(dev);
14339         rtnl_unlock();
14340
14341         /* Don't remove the netdevice, as there are scenarios which will cause
14342          * the kernel to hang, e.g., when trying to remove bnx2i while the
14343          * rootfs is mounted from SAN.
14344          */
14345         __bnx2x_remove(pdev, dev, bp, false);
14346 }
14347
14348 static struct pci_driver bnx2x_pci_driver = {
14349         .name        = DRV_MODULE_NAME,
14350         .id_table    = bnx2x_pci_tbl,
14351         .probe       = bnx2x_init_one,
14352         .remove      = bnx2x_remove_one,
14353         .suspend     = bnx2x_suspend,
14354         .resume      = bnx2x_resume,
14355         .err_handler = &bnx2x_err_handler,
14356 #ifdef CONFIG_BNX2X_SRIOV
14357         .sriov_configure = bnx2x_sriov_configure,
14358 #endif
14359         .shutdown    = bnx2x_shutdown,
14360 };
14361
14362 static int __init bnx2x_init(void)
14363 {
14364         int ret;
14365
14366         pr_info("%s", version);
14367
14368         bnx2x_wq = create_singlethread_workqueue("bnx2x");
14369         if (bnx2x_wq == NULL) {
14370                 pr_err("Cannot create workqueue\n");
14371                 return -ENOMEM;
14372         }
14373         bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14374         if (!bnx2x_iov_wq) {
14375                 pr_err("Cannot create iov workqueue\n");
14376                 destroy_workqueue(bnx2x_wq);
14377                 return -ENOMEM;
14378         }
14379
14380         ret = pci_register_driver(&bnx2x_pci_driver);
14381         if (ret) {
14382                 pr_err("Cannot register driver\n");
14383                 destroy_workqueue(bnx2x_wq);
14384                 destroy_workqueue(bnx2x_iov_wq);
14385         }
14386         return ret;
14387 }
14388
14389 static void __exit bnx2x_cleanup(void)
14390 {
14391         struct list_head *pos, *q;
14392
14393         pci_unregister_driver(&bnx2x_pci_driver);
14394
14395         destroy_workqueue(bnx2x_wq);
14396         destroy_workqueue(bnx2x_iov_wq);
14397
14398         /* Free globally allocated resources */
14399         list_for_each_safe(pos, q, &bnx2x_prev_list) {
14400                 struct bnx2x_prev_path_list *tmp =
14401                         list_entry(pos, struct bnx2x_prev_path_list, list);
14402                 list_del(pos);
14403                 kfree(tmp);
14404         }
14405 }
14406
14407 void bnx2x_notify_link_changed(struct bnx2x *bp)
14408 {
14409         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14410 }
14411
14412 module_init(bnx2x_init);
14413 module_exit(bnx2x_cleanup);
14414
14415 /**
14416  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
14417  *
14418  * @bp:         driver handle
14419  * @set:        set or clear the CAM entry
14420  *
14421  * This function will wait until the ramrod completion returns.
14422  * Return 0 if success, -ENODEV if ramrod doesn't return.
14423  */
14424 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14425 {
14426         unsigned long ramrod_flags = 0;
14427
14428         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14429         return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14430                                  &bp->iscsi_l2_mac_obj, true,
14431                                  BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14432 }
14433
14434 /* count denotes the number of new completions we have seen */
14435 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14436 {
14437         struct eth_spe *spe;
14438         int cxt_index, cxt_offset;
14439
14440 #ifdef BNX2X_STOP_ON_ERROR
14441         if (unlikely(bp->panic))
14442                 return;
14443 #endif
14444
14445         spin_lock_bh(&bp->spq_lock);
14446         BUG_ON(bp->cnic_spq_pending < count);
14447         bp->cnic_spq_pending -= count;
14448
14449         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14450                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14451                                 & SPE_HDR_CONN_TYPE) >>
14452                                 SPE_HDR_CONN_TYPE_SHIFT;
14453                 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14454                                 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14455
14456                 /* Set validation for iSCSI L2 client before sending SETUP
14457                  *  ramrod
14458                  */
14459                 if (type == ETH_CONNECTION_TYPE) {
14460                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14461                                 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14462                                         ILT_PAGE_CIDS;
14463                                 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14464                                         (cxt_index * ILT_PAGE_CIDS);
14465                                 bnx2x_set_ctx_validation(bp,
14466                                         &bp->context[cxt_index].
14467                                                          vcxt[cxt_offset].eth,
14468                                         BNX2X_ISCSI_ETH_CID(bp));
14469                         }
14470                 }
14471
14472                 /*
14473                  * There may be not more than 8 L2, not more than 8 L5 SPEs
14474                  * and in the air. We also check that number of outstanding
14475                  * COMMON ramrods is not more than the EQ and SPQ can
14476                  * accommodate.
14477                  */
14478                 if (type == ETH_CONNECTION_TYPE) {
14479                         if (!atomic_read(&bp->cq_spq_left))
14480                                 break;
14481                         else
14482                                 atomic_dec(&bp->cq_spq_left);
14483                 } else if (type == NONE_CONNECTION_TYPE) {
14484                         if (!atomic_read(&bp->eq_spq_left))
14485                                 break;
14486                         else
14487                                 atomic_dec(&bp->eq_spq_left);
14488                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14489                            (type == FCOE_CONNECTION_TYPE)) {
14490                         if (bp->cnic_spq_pending >=
14491                             bp->cnic_eth_dev.max_kwqe_pending)
14492                                 break;
14493                         else
14494                                 bp->cnic_spq_pending++;
14495                 } else {
14496                         BNX2X_ERR("Unknown SPE type: %d\n", type);
14497                         bnx2x_panic();
14498                         break;
14499                 }
14500
14501                 spe = bnx2x_sp_get_next(bp);
14502                 *spe = *bp->cnic_kwq_cons;
14503
14504                 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14505                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14506
14507                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14508                         bp->cnic_kwq_cons = bp->cnic_kwq;
14509                 else
14510                         bp->cnic_kwq_cons++;
14511         }
14512         bnx2x_sp_prod_update(bp);
14513         spin_unlock_bh(&bp->spq_lock);
14514 }
14515
14516 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14517                                struct kwqe_16 *kwqes[], u32 count)
14518 {
14519         struct bnx2x *bp = netdev_priv(dev);
14520         int i;
14521
14522 #ifdef BNX2X_STOP_ON_ERROR
14523         if (unlikely(bp->panic)) {
14524                 BNX2X_ERR("Can't post to SP queue while panic\n");
14525                 return -EIO;
14526         }
14527 #endif
14528
14529         if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14530             (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14531                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14532                 return -EAGAIN;
14533         }
14534
14535         spin_lock_bh(&bp->spq_lock);
14536
14537         for (i = 0; i < count; i++) {
14538                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14539
14540                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14541                         break;
14542
14543                 *bp->cnic_kwq_prod = *spe;
14544
14545                 bp->cnic_kwq_pending++;
14546
14547                 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14548                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
14549                    spe->data.update_data_addr.hi,
14550                    spe->data.update_data_addr.lo,
14551                    bp->cnic_kwq_pending);
14552
14553                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14554                         bp->cnic_kwq_prod = bp->cnic_kwq;
14555                 else
14556                         bp->cnic_kwq_prod++;
14557         }
14558
14559         spin_unlock_bh(&bp->spq_lock);
14560
14561         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14562                 bnx2x_cnic_sp_post(bp, 0);
14563
14564         return i;
14565 }
14566
14567 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14568 {
14569         struct cnic_ops *c_ops;
14570         int rc = 0;
14571
14572         mutex_lock(&bp->cnic_mutex);
14573         c_ops = rcu_dereference_protected(bp->cnic_ops,
14574                                           lockdep_is_held(&bp->cnic_mutex));
14575         if (c_ops)
14576                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14577         mutex_unlock(&bp->cnic_mutex);
14578
14579         return rc;
14580 }
14581
14582 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14583 {
14584         struct cnic_ops *c_ops;
14585         int rc = 0;
14586
14587         rcu_read_lock();
14588         c_ops = rcu_dereference(bp->cnic_ops);
14589         if (c_ops)
14590                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14591         rcu_read_unlock();
14592
14593         return rc;
14594 }
14595
14596 /*
14597  * for commands that have no data
14598  */
14599 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14600 {
14601         struct cnic_ctl_info ctl = {0};
14602
14603         ctl.cmd = cmd;
14604
14605         return bnx2x_cnic_ctl_send(bp, &ctl);
14606 }
14607
14608 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14609 {
14610         struct cnic_ctl_info ctl = {0};
14611
14612         /* first we tell CNIC and only then we count this as a completion */
14613         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14614         ctl.data.comp.cid = cid;
14615         ctl.data.comp.error = err;
14616
14617         bnx2x_cnic_ctl_send_bh(bp, &ctl);
14618         bnx2x_cnic_sp_post(bp, 0);
14619 }
14620
14621 /* Called with netif_addr_lock_bh() taken.
14622  * Sets an rx_mode config for an iSCSI ETH client.
14623  * Doesn't block.
14624  * Completion should be checked outside.
14625  */
14626 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14627 {
14628         unsigned long accept_flags = 0, ramrod_flags = 0;
14629         u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14630         int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14631
14632         if (start) {
14633                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
14634                  * because it's the only way for UIO Queue to accept
14635                  * multicasts (in non-promiscuous mode only one Queue per
14636                  * function will receive multicast packets (leading in our
14637                  * case).
14638                  */
14639                 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14640                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14641                 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14642                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14643
14644                 /* Clear STOP_PENDING bit if START is requested */
14645                 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14646
14647                 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14648         } else
14649                 /* Clear START_PENDING bit if STOP is requested */
14650                 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14651
14652         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14653                 set_bit(sched_state, &bp->sp_state);
14654         else {
14655                 __set_bit(RAMROD_RX, &ramrod_flags);
14656                 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14657                                     ramrod_flags);
14658         }
14659 }
14660
14661 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14662 {
14663         struct bnx2x *bp = netdev_priv(dev);
14664         int rc = 0;
14665
14666         switch (ctl->cmd) {
14667         case DRV_CTL_CTXTBL_WR_CMD: {
14668                 u32 index = ctl->data.io.offset;
14669                 dma_addr_t addr = ctl->data.io.dma_addr;
14670
14671                 bnx2x_ilt_wr(bp, index, addr);
14672                 break;
14673         }
14674
14675         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14676                 int count = ctl->data.credit.credit_count;
14677
14678                 bnx2x_cnic_sp_post(bp, count);
14679                 break;
14680         }
14681
14682         /* rtnl_lock is held.  */
14683         case DRV_CTL_START_L2_CMD: {
14684                 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14685                 unsigned long sp_bits = 0;
14686
14687                 /* Configure the iSCSI classification object */
14688                 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14689                                    cp->iscsi_l2_client_id,
14690                                    cp->iscsi_l2_cid, BP_FUNC(bp),
14691                                    bnx2x_sp(bp, mac_rdata),
14692                                    bnx2x_sp_mapping(bp, mac_rdata),
14693                                    BNX2X_FILTER_MAC_PENDING,
14694                                    &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14695                                    &bp->macs_pool);
14696
14697                 /* Set iSCSI MAC address */
14698                 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14699                 if (rc)
14700                         break;
14701
14702                 mmiowb();
14703                 barrier();
14704
14705                 /* Start accepting on iSCSI L2 ring */
14706
14707                 netif_addr_lock_bh(dev);
14708                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14709                 netif_addr_unlock_bh(dev);
14710
14711                 /* bits to wait on */
14712                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14713                 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14714
14715                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14716                         BNX2X_ERR("rx_mode completion timed out!\n");
14717
14718                 break;
14719         }
14720
14721         /* rtnl_lock is held.  */
14722         case DRV_CTL_STOP_L2_CMD: {
14723                 unsigned long sp_bits = 0;
14724
14725                 /* Stop accepting on iSCSI L2 ring */
14726                 netif_addr_lock_bh(dev);
14727                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14728                 netif_addr_unlock_bh(dev);
14729
14730                 /* bits to wait on */
14731                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14732                 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14733
14734                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14735                         BNX2X_ERR("rx_mode completion timed out!\n");
14736
14737                 mmiowb();
14738                 barrier();
14739
14740                 /* Unset iSCSI L2 MAC */
14741                 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14742                                         BNX2X_ISCSI_ETH_MAC, true);
14743                 break;
14744         }
14745         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14746                 int count = ctl->data.credit.credit_count;
14747
14748                 smp_mb__before_atomic();
14749                 atomic_add(count, &bp->cq_spq_left);
14750                 smp_mb__after_atomic();
14751                 break;
14752         }
14753         case DRV_CTL_ULP_REGISTER_CMD: {
14754                 int ulp_type = ctl->data.register_data.ulp_type;
14755
14756                 if (CHIP_IS_E3(bp)) {
14757                         int idx = BP_FW_MB_IDX(bp);
14758                         u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14759                         int path = BP_PATH(bp);
14760                         int port = BP_PORT(bp);
14761                         int i;
14762                         u32 scratch_offset;
14763                         u32 *host_addr;
14764
14765                         /* first write capability to shmem2 */
14766                         if (ulp_type == CNIC_ULP_ISCSI)
14767                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14768                         else if (ulp_type == CNIC_ULP_FCOE)
14769                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14770                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14771
14772                         if ((ulp_type != CNIC_ULP_FCOE) ||
14773                             (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14774                             (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
14775                                 break;
14776
14777                         /* if reached here - should write fcoe capabilities */
14778                         scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14779                         if (!scratch_offset)
14780                                 break;
14781                         scratch_offset += offsetof(struct glob_ncsi_oem_data,
14782                                                    fcoe_features[path][port]);
14783                         host_addr = (u32 *) &(ctl->data.register_data.
14784                                               fcoe_features);
14785                         for (i = 0; i < sizeof(struct fcoe_capabilities);
14786                              i += 4)
14787                                 REG_WR(bp, scratch_offset + i,
14788                                        *(host_addr + i/4));
14789                 }
14790                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14791                 break;
14792         }
14793
14794         case DRV_CTL_ULP_UNREGISTER_CMD: {
14795                 int ulp_type = ctl->data.ulp_type;
14796
14797                 if (CHIP_IS_E3(bp)) {
14798                         int idx = BP_FW_MB_IDX(bp);
14799                         u32 cap;
14800
14801                         cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14802                         if (ulp_type == CNIC_ULP_ISCSI)
14803                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14804                         else if (ulp_type == CNIC_ULP_FCOE)
14805                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14806                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14807                 }
14808                 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14809                 break;
14810         }
14811
14812         default:
14813                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14814                 rc = -EINVAL;
14815         }
14816
14817         /* For storage-only interfaces, change driver state */
14818         if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14819                 switch (ctl->drv_state) {
14820                 case DRV_NOP:
14821                         break;
14822                 case DRV_ACTIVE:
14823                         bnx2x_set_os_driver_state(bp,
14824                                                   OS_DRIVER_STATE_ACTIVE);
14825                         break;
14826                 case DRV_INACTIVE:
14827                         bnx2x_set_os_driver_state(bp,
14828                                                   OS_DRIVER_STATE_DISABLED);
14829                         break;
14830                 case DRV_UNLOADED:
14831                         bnx2x_set_os_driver_state(bp,
14832                                                   OS_DRIVER_STATE_NOT_LOADED);
14833                         break;
14834                 default:
14835                 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14836                 }
14837         }
14838
14839         return rc;
14840 }
14841
14842 static int bnx2x_get_fc_npiv(struct net_device *dev,
14843                              struct cnic_fc_npiv_tbl *cnic_tbl)
14844 {
14845         struct bnx2x *bp = netdev_priv(dev);
14846         struct bdn_fc_npiv_tbl *tbl = NULL;
14847         u32 offset, entries;
14848         int rc = -EINVAL;
14849         int i;
14850
14851         if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14852                 goto out;
14853
14854         DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14855
14856         tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14857         if (!tbl) {
14858                 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14859                 goto out;
14860         }
14861
14862         offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14863         if (!offset) {
14864                 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14865                 goto out;
14866         }
14867         DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14868
14869         /* Read the table contents from nvram */
14870         if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14871                 BNX2X_ERR("Failed to read FC-NPIV table\n");
14872                 goto out;
14873         }
14874
14875         /* Since bnx2x_nvram_read() returns data in be32, we need to convert
14876          * the number of entries back to cpu endianness.
14877          */
14878         entries = tbl->fc_npiv_cfg.num_of_npiv;
14879         entries = (__force u32)be32_to_cpu((__force __be32)entries);
14880         tbl->fc_npiv_cfg.num_of_npiv = entries;
14881
14882         if (!tbl->fc_npiv_cfg.num_of_npiv) {
14883                 DP(BNX2X_MSG_MCP,
14884                    "No FC-NPIV table [valid, simply not present]\n");
14885                 goto out;
14886         } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14887                 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14888                           tbl->fc_npiv_cfg.num_of_npiv);
14889                 goto out;
14890         } else {
14891                 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14892                    tbl->fc_npiv_cfg.num_of_npiv);
14893         }
14894
14895         /* Copy the data into cnic-provided struct */
14896         cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14897         for (i = 0; i < cnic_tbl->count; i++) {
14898                 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14899                 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14900         }
14901
14902         rc = 0;
14903 out:
14904         kfree(tbl);
14905         return rc;
14906 }
14907
14908 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14909 {
14910         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14911
14912         if (bp->flags & USING_MSIX_FLAG) {
14913                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14914                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14915                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14916         } else {
14917                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14918                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14919         }
14920         if (!CHIP_IS_E1x(bp))
14921                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14922         else
14923                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14924
14925         cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
14926         cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14927         cp->irq_arr[1].status_blk = bp->def_status_blk;
14928         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14929         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14930
14931         cp->num_irq = 2;
14932 }
14933
14934 void bnx2x_setup_cnic_info(struct bnx2x *bp)
14935 {
14936         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14937
14938         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14939                              bnx2x_cid_ilt_lines(bp);
14940         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14941         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14942         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14943
14944         DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14945            BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14946            cp->iscsi_l2_cid);
14947
14948         if (NO_ISCSI_OOO(bp))
14949                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14950 }
14951
14952 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14953                                void *data)
14954 {
14955         struct bnx2x *bp = netdev_priv(dev);
14956         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14957         int rc;
14958
14959         DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14960
14961         if (ops == NULL) {
14962                 BNX2X_ERR("NULL ops received\n");
14963                 return -EINVAL;
14964         }
14965
14966         if (!CNIC_SUPPORT(bp)) {
14967                 BNX2X_ERR("Can't register CNIC when not supported\n");
14968                 return -EOPNOTSUPP;
14969         }
14970
14971         if (!CNIC_LOADED(bp)) {
14972                 rc = bnx2x_load_cnic(bp);
14973                 if (rc) {
14974                         BNX2X_ERR("CNIC-related load failed\n");
14975                         return rc;
14976                 }
14977         }
14978
14979         bp->cnic_enabled = true;
14980
14981         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14982         if (!bp->cnic_kwq)
14983                 return -ENOMEM;
14984
14985         bp->cnic_kwq_cons = bp->cnic_kwq;
14986         bp->cnic_kwq_prod = bp->cnic_kwq;
14987         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14988
14989         bp->cnic_spq_pending = 0;
14990         bp->cnic_kwq_pending = 0;
14991
14992         bp->cnic_data = data;
14993
14994         cp->num_irq = 0;
14995         cp->drv_state |= CNIC_DRV_STATE_REGD;
14996         cp->iro_arr = bp->iro_arr;
14997
14998         bnx2x_setup_cnic_irq_info(bp);
14999
15000         rcu_assign_pointer(bp->cnic_ops, ops);
15001
15002         /* Schedule driver to read CNIC driver versions */
15003         bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15004
15005         return 0;
15006 }
15007
15008 static int bnx2x_unregister_cnic(struct net_device *dev)
15009 {
15010         struct bnx2x *bp = netdev_priv(dev);
15011         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15012
15013         mutex_lock(&bp->cnic_mutex);
15014         cp->drv_state = 0;
15015         RCU_INIT_POINTER(bp->cnic_ops, NULL);
15016         mutex_unlock(&bp->cnic_mutex);
15017         synchronize_rcu();
15018         bp->cnic_enabled = false;
15019         kfree(bp->cnic_kwq);
15020         bp->cnic_kwq = NULL;
15021
15022         return 0;
15023 }
15024
15025 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15026 {
15027         struct bnx2x *bp = netdev_priv(dev);
15028         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15029
15030         /* If both iSCSI and FCoE are disabled - return NULL in
15031          * order to indicate CNIC that it should not try to work
15032          * with this device.
15033          */
15034         if (NO_ISCSI(bp) && NO_FCOE(bp))
15035                 return NULL;
15036
15037         cp->drv_owner = THIS_MODULE;
15038         cp->chip_id = CHIP_ID(bp);
15039         cp->pdev = bp->pdev;
15040         cp->io_base = bp->regview;
15041         cp->io_base2 = bp->doorbells;
15042         cp->max_kwqe_pending = 8;
15043         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15044         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15045                              bnx2x_cid_ilt_lines(bp);
15046         cp->ctx_tbl_len = CNIC_ILT_LINES;
15047         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15048         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15049         cp->drv_ctl = bnx2x_drv_ctl;
15050         cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15051         cp->drv_register_cnic = bnx2x_register_cnic;
15052         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15053         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15054         cp->iscsi_l2_client_id =
15055                 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15056         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15057
15058         if (NO_ISCSI_OOO(bp))
15059                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15060
15061         if (NO_ISCSI(bp))
15062                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15063
15064         if (NO_FCOE(bp))
15065                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15066
15067         BNX2X_DEV_INFO(
15068                 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15069            cp->ctx_blk_size,
15070            cp->ctx_tbl_offset,
15071            cp->ctx_tbl_len,
15072            cp->starting_cid);
15073         return cp;
15074 }
15075
15076 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15077 {
15078         struct bnx2x *bp = fp->bp;
15079         u32 offset = BAR_USTRORM_INTMEM;
15080
15081         if (IS_VF(bp))
15082                 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15083         else if (!CHIP_IS_E1x(bp))
15084                 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15085         else
15086                 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15087
15088         return offset;
15089 }
15090
15091 /* called only on E1H or E2.
15092  * When pretending to be PF, the pretend value is the function number 0...7
15093  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
15094  * combination
15095  */
15096 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15097 {
15098         u32 pretend_reg;
15099
15100         if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15101                 return -1;
15102
15103         /* get my own pretend register */
15104         pretend_reg = bnx2x_get_pretend_reg(bp);
15105         REG_WR(bp, pretend_reg, pretend_func_val);
15106         REG_RD(bp, pretend_reg);
15107         return 0;
15108 }
15109
15110 static void bnx2x_ptp_task(struct work_struct *work)
15111 {
15112         struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15113         int port = BP_PORT(bp);
15114         u32 val_seq;
15115         u64 timestamp, ns;
15116         struct skb_shared_hwtstamps shhwtstamps;
15117
15118         /* Read Tx timestamp registers */
15119         val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15120                          NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15121         if (val_seq & 0x10000) {
15122                 /* There is a valid timestamp value */
15123                 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15124                                    NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15125                 timestamp <<= 32;
15126                 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15127                                     NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15128                 /* Reset timestamp register to allow new timestamp */
15129                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15130                        NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15131                 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15132
15133                 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15134                 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15135                 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15136                 dev_kfree_skb_any(bp->ptp_tx_skb);
15137                 bp->ptp_tx_skb = NULL;
15138
15139                 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15140                    timestamp, ns);
15141         } else {
15142                 DP(BNX2X_MSG_PTP, "There is no valid Tx timestamp yet\n");
15143                 /* Reschedule to keep checking for a valid timestamp value */
15144                 schedule_work(&bp->ptp_task);
15145         }
15146 }
15147
15148 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15149 {
15150         int port = BP_PORT(bp);
15151         u64 timestamp, ns;
15152
15153         timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15154                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15155         timestamp <<= 32;
15156         timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15157                             NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15158
15159         /* Reset timestamp register to allow new timestamp */
15160         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15161                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15162
15163         ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15164
15165         skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15166
15167         DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15168            timestamp, ns);
15169 }
15170
15171 /* Read the PHC */
15172 static cycle_t bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15173 {
15174         struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15175         int port = BP_PORT(bp);
15176         u32 wb_data[2];
15177         u64 phc_cycles;
15178
15179         REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15180                     NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15181         phc_cycles = wb_data[1];
15182         phc_cycles = (phc_cycles << 32) + wb_data[0];
15183
15184         DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15185
15186         return phc_cycles;
15187 }
15188
15189 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15190 {
15191         memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15192         bp->cyclecounter.read = bnx2x_cyclecounter_read;
15193         bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15194         bp->cyclecounter.shift = 1;
15195         bp->cyclecounter.mult = 1;
15196 }
15197
15198 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15199 {
15200         struct bnx2x_func_state_params func_params = {NULL};
15201         struct bnx2x_func_set_timesync_params *set_timesync_params =
15202                 &func_params.params.set_timesync;
15203
15204         /* Prepare parameters for function state transitions */
15205         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15206         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15207
15208         func_params.f_obj = &bp->func_obj;
15209         func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15210
15211         /* Function parameters */
15212         set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15213         set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15214
15215         return bnx2x_func_state_change(bp, &func_params);
15216 }
15217
15218 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15219 {
15220         struct bnx2x_queue_state_params q_params;
15221         int rc, i;
15222
15223         /* send queue update ramrod to enable PTP packets */
15224         memset(&q_params, 0, sizeof(q_params));
15225         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15226         q_params.cmd = BNX2X_Q_CMD_UPDATE;
15227         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15228                   &q_params.params.update.update_flags);
15229         __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15230                   &q_params.params.update.update_flags);
15231
15232         /* send the ramrod on all the queues of the PF */
15233         for_each_eth_queue(bp, i) {
15234                 struct bnx2x_fastpath *fp = &bp->fp[i];
15235
15236                 /* Set the appropriate Queue object */
15237                 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15238
15239                 /* Update the Queue state */
15240                 rc = bnx2x_queue_state_change(bp, &q_params);
15241                 if (rc) {
15242                         BNX2X_ERR("Failed to enable PTP packets\n");
15243                         return rc;
15244                 }
15245         }
15246
15247         return 0;
15248 }
15249
15250 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15251 {
15252         int port = BP_PORT(bp);
15253         int rc;
15254
15255         if (!bp->hwtstamp_ioctl_called)
15256                 return 0;
15257
15258         switch (bp->tx_type) {
15259         case HWTSTAMP_TX_ON:
15260                 bp->flags |= TX_TIMESTAMPING_EN;
15261                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15262                        NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
15263                 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15264                        NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
15265                 break;
15266         case HWTSTAMP_TX_ONESTEP_SYNC:
15267                 BNX2X_ERR("One-step timestamping is not supported\n");
15268                 return -ERANGE;
15269         }
15270
15271         switch (bp->rx_filter) {
15272         case HWTSTAMP_FILTER_NONE:
15273                 break;
15274         case HWTSTAMP_FILTER_ALL:
15275         case HWTSTAMP_FILTER_SOME:
15276                 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15277                 break;
15278         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15279         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15280         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15281                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15282                 /* Initialize PTP detection for UDP/IPv4 events */
15283                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15284                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
15285                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15286                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
15287                 break;
15288         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15289         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15290         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15291                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15292                 /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
15293                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15294                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
15295                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15296                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
15297                 break;
15298         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15299         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15300         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15301                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15302                 /* Initialize PTP detection L2 events */
15303                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15304                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
15305                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15306                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
15307
15308                 break;
15309         case HWTSTAMP_FILTER_PTP_V2_EVENT:
15310         case HWTSTAMP_FILTER_PTP_V2_SYNC:
15311         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15312                 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15313                 /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
15314                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15315                        NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
15316                 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15317                        NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
15318                 break;
15319         }
15320
15321         /* Indicate to FW that this PF expects recorded PTP packets */
15322         rc = bnx2x_enable_ptp_packets(bp);
15323         if (rc)
15324                 return rc;
15325
15326         /* Enable sending PTP packets to host */
15327         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15328                NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15329
15330         return 0;
15331 }
15332
15333 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15334 {
15335         struct hwtstamp_config config;
15336         int rc;
15337
15338         DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15339
15340         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15341                 return -EFAULT;
15342
15343         DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15344            config.tx_type, config.rx_filter);
15345
15346         if (config.flags) {
15347                 BNX2X_ERR("config.flags is reserved for future use\n");
15348                 return -EINVAL;
15349         }
15350
15351         bp->hwtstamp_ioctl_called = 1;
15352         bp->tx_type = config.tx_type;
15353         bp->rx_filter = config.rx_filter;
15354
15355         rc = bnx2x_configure_ptp_filters(bp);
15356         if (rc)
15357                 return rc;
15358
15359         config.rx_filter = bp->rx_filter;
15360
15361         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15362                 -EFAULT : 0;
15363 }
15364
15365 /* Configures HW for PTP */
15366 static int bnx2x_configure_ptp(struct bnx2x *bp)
15367 {
15368         int rc, port = BP_PORT(bp);
15369         u32 wb_data[2];
15370
15371         /* Reset PTP event detection rules - will be configured in the IOCTL */
15372         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15373                NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15374         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15375                NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15376         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15377                NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15378         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15379                NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15380
15381         /* Disable PTP packets to host - will be configured in the IOCTL*/
15382         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15383                NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15384
15385         /* Enable the PTP feature */
15386         REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15387                NIG_REG_P0_PTP_EN, 0x3F);
15388
15389         /* Enable the free-running counter */
15390         wb_data[0] = 0;
15391         wb_data[1] = 0;
15392         REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15393
15394         /* Reset drift register (offset register is not reset) */
15395         rc = bnx2x_send_reset_timesync_ramrod(bp);
15396         if (rc) {
15397                 BNX2X_ERR("Failed to reset PHC drift register\n");
15398                 return -EFAULT;
15399         }
15400
15401         /* Reset possibly old timestamps */
15402         REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15403                NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15404         REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15405                NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15406
15407         return 0;
15408 }
15409
15410 /* Called during load, to initialize PTP-related stuff */
15411 void bnx2x_init_ptp(struct bnx2x *bp)
15412 {
15413         int rc;
15414
15415         /* Configure PTP in HW */
15416         rc = bnx2x_configure_ptp(bp);
15417         if (rc) {
15418                 BNX2X_ERR("Stopping PTP initialization\n");
15419                 return;
15420         }
15421
15422         /* Init work queue for Tx timestamping */
15423         INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15424
15425         /* Init cyclecounter and timecounter. This is done only in the first
15426          * load. If done in every load, PTP application will fail when doing
15427          * unload / load (e.g. MTU change) while it is running.
15428          */
15429         if (!bp->timecounter_init_done) {
15430                 bnx2x_init_cyclecounter(bp);
15431                 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15432                                  ktime_to_ns(ktime_get_real()));
15433                 bp->timecounter_init_done = 1;
15434         }
15435
15436         DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15437 }