]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Merge branches 'fixes', 'misc', 'mmci', 'unstable/dma-for-next' and 'sa11x0' into...
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/device.h>  /* for dev_info() */
24 #include <linux/timer.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/init.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/bitops.h>
36 #include <linux/irq.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/time.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54 #include <linux/semaphore.h>
55 #include <linux/stringify.h>
56 #include <linux/vmalloc.h>
57
58 #include "bnx2x.h"
59 #include "bnx2x_init.h"
60 #include "bnx2x_init_ops.h"
61 #include "bnx2x_cmn.h"
62 #include "bnx2x_vfpf.h"
63 #include "bnx2x_dcb.h"
64 #include "bnx2x_sp.h"
65
66 #include <linux/firmware.h>
67 #include "bnx2x_fw_file_hdr.h"
68 /* FW files */
69 #define FW_FILE_VERSION                                 \
70         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
71         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
72         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
73         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
74 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
75 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
76 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77
78 /* Time in jiffies before concluding the transmitter is hung */
79 #define TX_TIMEOUT              (5*HZ)
80
81 static char version[] =
82         "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
83         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
84
85 MODULE_AUTHOR("Eliezer Tamir");
86 MODULE_DESCRIPTION("Broadcom NetXtreme II "
87                    "BCM57710/57711/57711E/"
88                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
89                    "57840/57840_MF Driver");
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_MODULE_VERSION);
92 MODULE_FIRMWARE(FW_FILE_NAME_E1);
93 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94 MODULE_FIRMWARE(FW_FILE_NAME_E2);
95
96 int num_queues;
97 module_param(num_queues, int, 0);
98 MODULE_PARM_DESC(num_queues,
99                  " Set number of queues (default is as a number of CPUs)");
100
101 static int disable_tpa;
102 module_param(disable_tpa, int, 0);
103 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104
105 int int_mode;
106 module_param(int_mode, int, 0);
107 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
108                                 "(1 INT#x; 2 MSI)");
109
110 static int dropless_fc;
111 module_param(dropless_fc, int, 0);
112 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
118 static int debug;
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
121
122 struct workqueue_struct *bnx2x_wq;
123
124 struct bnx2x_mac_vals {
125         u32 xmac_addr;
126         u32 xmac_val;
127         u32 emac_addr;
128         u32 emac_val;
129         u32 umac_addr;
130         u32 umac_val;
131         u32 bmac_addr;
132         u32 bmac_val[2];
133 };
134
135 enum bnx2x_board_type {
136         BCM57710 = 0,
137         BCM57711,
138         BCM57711E,
139         BCM57712,
140         BCM57712_MF,
141         BCM57712_VF,
142         BCM57800,
143         BCM57800_MF,
144         BCM57800_VF,
145         BCM57810,
146         BCM57810_MF,
147         BCM57810_VF,
148         BCM57840_4_10,
149         BCM57840_2_20,
150         BCM57840_MF,
151         BCM57840_VF,
152         BCM57811,
153         BCM57811_MF,
154         BCM57840_O,
155         BCM57840_MFO,
156         BCM57811_VF
157 };
158
159 /* indexed by board_type, above */
160 static struct {
161         char *name;
162 } board_info[] = {
163         [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
164         [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
165         [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
166         [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
167         [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
168         [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
169         [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
170         [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
171         [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
172         [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
173         [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
174         [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
175         [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
176         [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
177         [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
178         [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
179         [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
180         [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
181         [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
182         [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
183         [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
184 };
185
186 #ifndef PCI_DEVICE_ID_NX2_57710
187 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
188 #endif
189 #ifndef PCI_DEVICE_ID_NX2_57711
190 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
191 #endif
192 #ifndef PCI_DEVICE_ID_NX2_57711E
193 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
194 #endif
195 #ifndef PCI_DEVICE_ID_NX2_57712
196 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
197 #endif
198 #ifndef PCI_DEVICE_ID_NX2_57712_MF
199 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
200 #endif
201 #ifndef PCI_DEVICE_ID_NX2_57712_VF
202 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
203 #endif
204 #ifndef PCI_DEVICE_ID_NX2_57800
205 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
206 #endif
207 #ifndef PCI_DEVICE_ID_NX2_57800_MF
208 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
209 #endif
210 #ifndef PCI_DEVICE_ID_NX2_57800_VF
211 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
212 #endif
213 #ifndef PCI_DEVICE_ID_NX2_57810
214 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
215 #endif
216 #ifndef PCI_DEVICE_ID_NX2_57810_MF
217 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
218 #endif
219 #ifndef PCI_DEVICE_ID_NX2_57840_O
220 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
221 #endif
222 #ifndef PCI_DEVICE_ID_NX2_57810_VF
223 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
224 #endif
225 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
226 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
227 #endif
228 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
229 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
230 #endif
231 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
232 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
233 #endif
234 #ifndef PCI_DEVICE_ID_NX2_57840_MF
235 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
236 #endif
237 #ifndef PCI_DEVICE_ID_NX2_57840_VF
238 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
239 #endif
240 #ifndef PCI_DEVICE_ID_NX2_57811
241 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
242 #endif
243 #ifndef PCI_DEVICE_ID_NX2_57811_MF
244 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
245 #endif
246 #ifndef PCI_DEVICE_ID_NX2_57811_VF
247 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
248 #endif
249
250 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
251         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
252         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
253         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
254         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
255         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
256         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
257         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
271         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
272         { 0 }
273 };
274
275 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
276
277 /* Global resources for unloading a previously loaded device */
278 #define BNX2X_PREV_WAIT_NEEDED 1
279 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
280 static LIST_HEAD(bnx2x_prev_list);
281 /****************************************************************************
282 * General service functions
283 ****************************************************************************/
284
285 static void __storm_memset_dma_mapping(struct bnx2x *bp,
286                                        u32 addr, dma_addr_t mapping)
287 {
288         REG_WR(bp,  addr, U64_LO(mapping));
289         REG_WR(bp,  addr + 4, U64_HI(mapping));
290 }
291
292 static void storm_memset_spq_addr(struct bnx2x *bp,
293                                   dma_addr_t mapping, u16 abs_fid)
294 {
295         u32 addr = XSEM_REG_FAST_MEMORY +
296                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
297
298         __storm_memset_dma_mapping(bp, addr, mapping);
299 }
300
301 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
302                                   u16 pf_id)
303 {
304         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
305                 pf_id);
306         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
307                 pf_id);
308         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
309                 pf_id);
310         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
311                 pf_id);
312 }
313
314 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
315                                  u8 enable)
316 {
317         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
318                 enable);
319         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
320                 enable);
321         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
322                 enable);
323         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
324                 enable);
325 }
326
327 static void storm_memset_eq_data(struct bnx2x *bp,
328                                  struct event_ring_data *eq_data,
329                                 u16 pfid)
330 {
331         size_t size = sizeof(struct event_ring_data);
332
333         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
334
335         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
336 }
337
338 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
339                                  u16 pfid)
340 {
341         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
342         REG_WR16(bp, addr, eq_prod);
343 }
344
345 /* used only at init
346  * locking is done by mcp
347  */
348 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
349 {
350         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
351         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
352         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
353                                PCICFG_VENDOR_ID_OFFSET);
354 }
355
356 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
357 {
358         u32 val;
359
360         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
361         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
362         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
363                                PCICFG_VENDOR_ID_OFFSET);
364
365         return val;
366 }
367
368 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
369 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
370 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
371 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
372 #define DMAE_DP_DST_NONE        "dst_addr [none]"
373
374 static void bnx2x_dp_dmae(struct bnx2x *bp,
375                           struct dmae_command *dmae, int msglvl)
376 {
377         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
378         int i;
379
380         switch (dmae->opcode & DMAE_COMMAND_DST) {
381         case DMAE_CMD_DST_PCI:
382                 if (src_type == DMAE_CMD_SRC_PCI)
383                         DP(msglvl, "DMAE: opcode 0x%08x\n"
384                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
385                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
386                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
387                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
388                            dmae->comp_addr_hi, dmae->comp_addr_lo,
389                            dmae->comp_val);
390                 else
391                         DP(msglvl, "DMAE: opcode 0x%08x\n"
392                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
393                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
394                            dmae->opcode, dmae->src_addr_lo >> 2,
395                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
396                            dmae->comp_addr_hi, dmae->comp_addr_lo,
397                            dmae->comp_val);
398                 break;
399         case DMAE_CMD_DST_GRC:
400                 if (src_type == DMAE_CMD_SRC_PCI)
401                         DP(msglvl, "DMAE: opcode 0x%08x\n"
402                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
403                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
404                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405                            dmae->len, dmae->dst_addr_lo >> 2,
406                            dmae->comp_addr_hi, dmae->comp_addr_lo,
407                            dmae->comp_val);
408                 else
409                         DP(msglvl, "DMAE: opcode 0x%08x\n"
410                            "src [%08x], len [%d*4], dst [%08x]\n"
411                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
412                            dmae->opcode, dmae->src_addr_lo >> 2,
413                            dmae->len, dmae->dst_addr_lo >> 2,
414                            dmae->comp_addr_hi, dmae->comp_addr_lo,
415                            dmae->comp_val);
416                 break;
417         default:
418                 if (src_type == DMAE_CMD_SRC_PCI)
419                         DP(msglvl, "DMAE: opcode 0x%08x\n"
420                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
421                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
422                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
424                            dmae->comp_val);
425                 else
426                         DP(msglvl, "DMAE: opcode 0x%08x\n"
427                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
428                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
429                            dmae->opcode, dmae->src_addr_lo >> 2,
430                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
431                            dmae->comp_val);
432                 break;
433         }
434
435         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
436                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
437                    i, *(((u32 *)dmae) + i));
438 }
439
440 /* copy command into DMAE command memory and set DMAE command go */
441 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
442 {
443         u32 cmd_offset;
444         int i;
445
446         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
447         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
448                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
449         }
450         REG_WR(bp, dmae_reg_go_c[idx], 1);
451 }
452
453 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
454 {
455         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
456                            DMAE_CMD_C_ENABLE);
457 }
458
459 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
460 {
461         return opcode & ~DMAE_CMD_SRC_RESET;
462 }
463
464 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
465                              bool with_comp, u8 comp_type)
466 {
467         u32 opcode = 0;
468
469         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
470                    (dst_type << DMAE_COMMAND_DST_SHIFT));
471
472         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
473
474         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
475         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
476                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
477         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
478
479 #ifdef __BIG_ENDIAN
480         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
481 #else
482         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
483 #endif
484         if (with_comp)
485                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
486         return opcode;
487 }
488
489 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
490                                       struct dmae_command *dmae,
491                                       u8 src_type, u8 dst_type)
492 {
493         memset(dmae, 0, sizeof(struct dmae_command));
494
495         /* set the opcode */
496         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
497                                          true, DMAE_COMP_PCI);
498
499         /* fill in the completion parameters */
500         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
501         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
502         dmae->comp_val = DMAE_COMP_VAL;
503 }
504
505 /* issue a dmae command over the init-channel and wait for completion */
506 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
507 {
508         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
509         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
510         int rc = 0;
511
512         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
513
514         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
515          * as long as this code is called both from syscall context and
516          * from ndo_set_rx_mode() flow that may be called from BH.
517          */
518         spin_lock_bh(&bp->dmae_lock);
519
520         /* reset completion */
521         *wb_comp = 0;
522
523         /* post the command on the channel used for initializations */
524         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
525
526         /* wait for completion */
527         udelay(5);
528         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
529
530                 if (!cnt ||
531                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
532                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
533                         BNX2X_ERR("DMAE timeout!\n");
534                         rc = DMAE_TIMEOUT;
535                         goto unlock;
536                 }
537                 cnt--;
538                 udelay(50);
539         }
540         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
541                 BNX2X_ERR("DMAE PCI error!\n");
542                 rc = DMAE_PCI_ERROR;
543         }
544
545 unlock:
546         spin_unlock_bh(&bp->dmae_lock);
547         return rc;
548 }
549
550 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
551                       u32 len32)
552 {
553         int rc;
554         struct dmae_command dmae;
555
556         if (!bp->dmae_ready) {
557                 u32 *data = bnx2x_sp(bp, wb_data[0]);
558
559                 if (CHIP_IS_E1(bp))
560                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
561                 else
562                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
563                 return;
564         }
565
566         /* set opcode and fixed command fields */
567         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
568
569         /* fill in addresses and len */
570         dmae.src_addr_lo = U64_LO(dma_addr);
571         dmae.src_addr_hi = U64_HI(dma_addr);
572         dmae.dst_addr_lo = dst_addr >> 2;
573         dmae.dst_addr_hi = 0;
574         dmae.len = len32;
575
576         /* issue the command and wait for completion */
577         rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
578         if (rc) {
579                 BNX2X_ERR("DMAE returned failure %d\n", rc);
580                 bnx2x_panic();
581         }
582 }
583
584 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
585 {
586         int rc;
587         struct dmae_command dmae;
588
589         if (!bp->dmae_ready) {
590                 u32 *data = bnx2x_sp(bp, wb_data[0]);
591                 int i;
592
593                 if (CHIP_IS_E1(bp))
594                         for (i = 0; i < len32; i++)
595                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
596                 else
597                         for (i = 0; i < len32; i++)
598                                 data[i] = REG_RD(bp, src_addr + i*4);
599
600                 return;
601         }
602
603         /* set opcode and fixed command fields */
604         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
605
606         /* fill in addresses and len */
607         dmae.src_addr_lo = src_addr >> 2;
608         dmae.src_addr_hi = 0;
609         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
610         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
611         dmae.len = len32;
612
613         /* issue the command and wait for completion */
614         rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
615         if (rc) {
616                 BNX2X_ERR("DMAE returned failure %d\n", rc);
617                 bnx2x_panic();
618         }
619 }
620
621 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
622                                       u32 addr, u32 len)
623 {
624         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
625         int offset = 0;
626
627         while (len > dmae_wr_max) {
628                 bnx2x_write_dmae(bp, phys_addr + offset,
629                                  addr + offset, dmae_wr_max);
630                 offset += dmae_wr_max * 4;
631                 len -= dmae_wr_max;
632         }
633
634         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
635 }
636
637 static int bnx2x_mc_assert(struct bnx2x *bp)
638 {
639         char last_idx;
640         int i, rc = 0;
641         u32 row0, row1, row2, row3;
642
643         /* XSTORM */
644         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
645                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
646         if (last_idx)
647                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
648
649         /* print the asserts */
650         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
651
652                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
653                               XSTORM_ASSERT_LIST_OFFSET(i));
654                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
655                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
656                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
657                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
658                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
659                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
660
661                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
662                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
663                                   i, row3, row2, row1, row0);
664                         rc++;
665                 } else {
666                         break;
667                 }
668         }
669
670         /* TSTORM */
671         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
672                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
673         if (last_idx)
674                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
675
676         /* print the asserts */
677         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
678
679                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
680                               TSTORM_ASSERT_LIST_OFFSET(i));
681                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
682                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
683                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
684                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
685                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
686                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
687
688                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
689                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
690                                   i, row3, row2, row1, row0);
691                         rc++;
692                 } else {
693                         break;
694                 }
695         }
696
697         /* CSTORM */
698         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
699                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
700         if (last_idx)
701                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
702
703         /* print the asserts */
704         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
705
706                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
707                               CSTORM_ASSERT_LIST_OFFSET(i));
708                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
709                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
710                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
711                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
712                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
713                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
714
715                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
716                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
717                                   i, row3, row2, row1, row0);
718                         rc++;
719                 } else {
720                         break;
721                 }
722         }
723
724         /* USTORM */
725         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
726                            USTORM_ASSERT_LIST_INDEX_OFFSET);
727         if (last_idx)
728                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
729
730         /* print the asserts */
731         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
732
733                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
734                               USTORM_ASSERT_LIST_OFFSET(i));
735                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
736                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
737                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
738                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
739                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
740                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
741
742                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
743                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
744                                   i, row3, row2, row1, row0);
745                         rc++;
746                 } else {
747                         break;
748                 }
749         }
750
751         return rc;
752 }
753
754 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755 {
756         u32 addr, val;
757         u32 mark, offset;
758         __be32 data[9];
759         int word;
760         u32 trace_shmem_base;
761         if (BP_NOMCP(bp)) {
762                 BNX2X_ERR("NO MCP - can not dump\n");
763                 return;
764         }
765         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
766                 (bp->common.bc_ver & 0xff0000) >> 16,
767                 (bp->common.bc_ver & 0xff00) >> 8,
768                 (bp->common.bc_ver & 0xff));
769
770         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
771         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
772                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
773
774         if (BP_PATH(bp) == 0)
775                 trace_shmem_base = bp->common.shmem_base;
776         else
777                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778         addr = trace_shmem_base - 0x800;
779
780         /* validate TRCB signature */
781         mark = REG_RD(bp, addr);
782         if (mark != MFW_TRACE_SIGNATURE) {
783                 BNX2X_ERR("Trace buffer signature is missing.");
784                 return ;
785         }
786
787         /* read cyclic buffer pointer */
788         addr += 4;
789         mark = REG_RD(bp, addr);
790         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
791                         + ((mark + 0x3) & ~0x3) - 0x08000000;
792         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
793
794         printk("%s", lvl);
795
796         /* dump buffer after the mark */
797         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
798                 for (word = 0; word < 8; word++)
799                         data[word] = htonl(REG_RD(bp, offset + 4*word));
800                 data[8] = 0x0;
801                 pr_cont("%s", (char *)data);
802         }
803
804         /* dump buffer before the mark */
805         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
806                 for (word = 0; word < 8; word++)
807                         data[word] = htonl(REG_RD(bp, offset + 4*word));
808                 data[8] = 0x0;
809                 pr_cont("%s", (char *)data);
810         }
811         printk("%s" "end of fw dump\n", lvl);
812 }
813
814 static void bnx2x_fw_dump(struct bnx2x *bp)
815 {
816         bnx2x_fw_dump_lvl(bp, KERN_ERR);
817 }
818
819 static void bnx2x_hc_int_disable(struct bnx2x *bp)
820 {
821         int port = BP_PORT(bp);
822         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
823         u32 val = REG_RD(bp, addr);
824
825         /* in E1 we must use only PCI configuration space to disable
826          * MSI/MSIX capability
827          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
828          */
829         if (CHIP_IS_E1(bp)) {
830                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
831                  * Use mask register to prevent from HC sending interrupts
832                  * after we exit the function
833                  */
834                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
835
836                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
837                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
838                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
839         } else
840                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
841                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
842                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
843                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
844
845         DP(NETIF_MSG_IFDOWN,
846            "write %x to HC %d (addr 0x%x)\n",
847            val, port, addr);
848
849         /* flush all outstanding writes */
850         mmiowb();
851
852         REG_WR(bp, addr, val);
853         if (REG_RD(bp, addr) != val)
854                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
855 }
856
857 static void bnx2x_igu_int_disable(struct bnx2x *bp)
858 {
859         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
860
861         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
862                  IGU_PF_CONF_INT_LINE_EN |
863                  IGU_PF_CONF_ATTN_BIT_EN);
864
865         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
866
867         /* flush all outstanding writes */
868         mmiowb();
869
870         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
871         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
872                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
873 }
874
875 static void bnx2x_int_disable(struct bnx2x *bp)
876 {
877         if (bp->common.int_block == INT_BLOCK_HC)
878                 bnx2x_hc_int_disable(bp);
879         else
880                 bnx2x_igu_int_disable(bp);
881 }
882
883 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
884 {
885         int i;
886         u16 j;
887         struct hc_sp_status_block_data sp_sb_data;
888         int func = BP_FUNC(bp);
889 #ifdef BNX2X_STOP_ON_ERROR
890         u16 start = 0, end = 0;
891         u8 cos;
892 #endif
893         if (disable_int)
894                 bnx2x_int_disable(bp);
895
896         bp->stats_state = STATS_STATE_DISABLED;
897         bp->eth_stats.unrecoverable_error++;
898         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
899
900         BNX2X_ERR("begin crash dump -----------------\n");
901
902         /* Indices */
903         /* Common */
904         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
905                   bp->def_idx, bp->def_att_idx, bp->attn_state,
906                   bp->spq_prod_idx, bp->stats_counter);
907         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
908                   bp->def_status_blk->atten_status_block.attn_bits,
909                   bp->def_status_blk->atten_status_block.attn_bits_ack,
910                   bp->def_status_blk->atten_status_block.status_block_id,
911                   bp->def_status_blk->atten_status_block.attn_bits_index);
912         BNX2X_ERR("     def (");
913         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
914                 pr_cont("0x%x%s",
915                         bp->def_status_blk->sp_sb.index_values[i],
916                         (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
917
918         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
919                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
920                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
921                         i*sizeof(u32));
922
923         pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
924                sp_sb_data.igu_sb_id,
925                sp_sb_data.igu_seg_id,
926                sp_sb_data.p_func.pf_id,
927                sp_sb_data.p_func.vnic_id,
928                sp_sb_data.p_func.vf_id,
929                sp_sb_data.p_func.vf_valid,
930                sp_sb_data.state);
931
932         for_each_eth_queue(bp, i) {
933                 struct bnx2x_fastpath *fp = &bp->fp[i];
934                 int loop;
935                 struct hc_status_block_data_e2 sb_data_e2;
936                 struct hc_status_block_data_e1x sb_data_e1x;
937                 struct hc_status_block_sm  *hc_sm_p =
938                         CHIP_IS_E1x(bp) ?
939                         sb_data_e1x.common.state_machine :
940                         sb_data_e2.common.state_machine;
941                 struct hc_index_data *hc_index_p =
942                         CHIP_IS_E1x(bp) ?
943                         sb_data_e1x.index_data :
944                         sb_data_e2.index_data;
945                 u8 data_size, cos;
946                 u32 *sb_data_p;
947                 struct bnx2x_fp_txdata txdata;
948
949                 /* Rx */
950                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
951                           i, fp->rx_bd_prod, fp->rx_bd_cons,
952                           fp->rx_comp_prod,
953                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
954                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
955                           fp->rx_sge_prod, fp->last_max_sge,
956                           le16_to_cpu(fp->fp_hc_idx));
957
958                 /* Tx */
959                 for_each_cos_in_tx_queue(fp, cos)
960                 {
961                         txdata = *fp->txdata_ptr[cos];
962                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
963                                   i, txdata.tx_pkt_prod,
964                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
965                                   txdata.tx_bd_cons,
966                                   le16_to_cpu(*txdata.tx_cons_sb));
967                 }
968
969                 loop = CHIP_IS_E1x(bp) ?
970                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
971
972                 /* host sb data */
973
974                 if (IS_FCOE_FP(fp))
975                         continue;
976
977                 BNX2X_ERR("     run indexes (");
978                 for (j = 0; j < HC_SB_MAX_SM; j++)
979                         pr_cont("0x%x%s",
980                                fp->sb_running_index[j],
981                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
982
983                 BNX2X_ERR("     indexes (");
984                 for (j = 0; j < loop; j++)
985                         pr_cont("0x%x%s",
986                                fp->sb_index_values[j],
987                                (j == loop - 1) ? ")" : " ");
988                 /* fw sb data */
989                 data_size = CHIP_IS_E1x(bp) ?
990                         sizeof(struct hc_status_block_data_e1x) :
991                         sizeof(struct hc_status_block_data_e2);
992                 data_size /= sizeof(u32);
993                 sb_data_p = CHIP_IS_E1x(bp) ?
994                         (u32 *)&sb_data_e1x :
995                         (u32 *)&sb_data_e2;
996                 /* copy sb data in here */
997                 for (j = 0; j < data_size; j++)
998                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
999                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1000                                 j * sizeof(u32));
1001
1002                 if (!CHIP_IS_E1x(bp)) {
1003                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1004                                 sb_data_e2.common.p_func.pf_id,
1005                                 sb_data_e2.common.p_func.vf_id,
1006                                 sb_data_e2.common.p_func.vf_valid,
1007                                 sb_data_e2.common.p_func.vnic_id,
1008                                 sb_data_e2.common.same_igu_sb_1b,
1009                                 sb_data_e2.common.state);
1010                 } else {
1011                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1012                                 sb_data_e1x.common.p_func.pf_id,
1013                                 sb_data_e1x.common.p_func.vf_id,
1014                                 sb_data_e1x.common.p_func.vf_valid,
1015                                 sb_data_e1x.common.p_func.vnic_id,
1016                                 sb_data_e1x.common.same_igu_sb_1b,
1017                                 sb_data_e1x.common.state);
1018                 }
1019
1020                 /* SB_SMs data */
1021                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1022                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1023                                 j, hc_sm_p[j].__flags,
1024                                 hc_sm_p[j].igu_sb_id,
1025                                 hc_sm_p[j].igu_seg_id,
1026                                 hc_sm_p[j].time_to_expire,
1027                                 hc_sm_p[j].timer_value);
1028                 }
1029
1030                 /* Indices data */
1031                 for (j = 0; j < loop; j++) {
1032                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039
1040         /* event queue */
1041         BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1042         for (i = 0; i < NUM_EQ_DESC; i++) {
1043                 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1044
1045                 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1046                           i, bp->eq_ring[i].message.opcode,
1047                           bp->eq_ring[i].message.error);
1048                 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1049         }
1050
1051         /* Rings */
1052         /* Rx */
1053         for_each_valid_rx_queue(bp, i) {
1054                 struct bnx2x_fastpath *fp = &bp->fp[i];
1055
1056                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1057                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1058                 for (j = start; j != end; j = RX_BD(j + 1)) {
1059                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1060                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1061
1062                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1063                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1064                 }
1065
1066                 start = RX_SGE(fp->rx_sge_prod);
1067                 end = RX_SGE(fp->last_max_sge);
1068                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1069                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1070                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1071
1072                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1073                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1074                 }
1075
1076                 start = RCQ_BD(fp->rx_comp_cons - 10);
1077                 end = RCQ_BD(fp->rx_comp_cons + 503);
1078                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1079                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1080
1081                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1082                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1083                 }
1084         }
1085
1086         /* Tx */
1087         for_each_valid_tx_queue(bp, i) {
1088                 struct bnx2x_fastpath *fp = &bp->fp[i];
1089                 for_each_cos_in_tx_queue(fp, cos) {
1090                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1091
1092                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1093                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1094                         for (j = start; j != end; j = TX_BD(j + 1)) {
1095                                 struct sw_tx_bd *sw_bd =
1096                                         &txdata->tx_buf_ring[j];
1097
1098                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1099                                           i, cos, j, sw_bd->skb,
1100                                           sw_bd->first_bd);
1101                         }
1102
1103                         start = TX_BD(txdata->tx_bd_cons - 10);
1104                         end = TX_BD(txdata->tx_bd_cons + 254);
1105                         for (j = start; j != end; j = TX_BD(j + 1)) {
1106                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1107
1108                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1109                                           i, cos, j, tx_bd[0], tx_bd[1],
1110                                           tx_bd[2], tx_bd[3]);
1111                         }
1112                 }
1113         }
1114 #endif
1115         bnx2x_fw_dump(bp);
1116         bnx2x_mc_assert(bp);
1117         BNX2X_ERR("end crash dump -----------------\n");
1118 }
1119
1120 /*
1121  * FLR Support for E2
1122  *
1123  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1124  * initialization.
1125  */
1126 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1127 #define FLR_WAIT_INTERVAL       50      /* usec */
1128 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1129
1130 struct pbf_pN_buf_regs {
1131         int pN;
1132         u32 init_crd;
1133         u32 crd;
1134         u32 crd_freed;
1135 };
1136
1137 struct pbf_pN_cmd_regs {
1138         int pN;
1139         u32 lines_occup;
1140         u32 lines_freed;
1141 };
1142
1143 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1144                                      struct pbf_pN_buf_regs *regs,
1145                                      u32 poll_count)
1146 {
1147         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1148         u32 cur_cnt = poll_count;
1149
1150         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1151         crd = crd_start = REG_RD(bp, regs->crd);
1152         init_crd = REG_RD(bp, regs->init_crd);
1153
1154         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1155         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1156         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1157
1158         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1159                (init_crd - crd_start))) {
1160                 if (cur_cnt--) {
1161                         udelay(FLR_WAIT_INTERVAL);
1162                         crd = REG_RD(bp, regs->crd);
1163                         crd_freed = REG_RD(bp, regs->crd_freed);
1164                 } else {
1165                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1166                            regs->pN);
1167                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1168                            regs->pN, crd);
1169                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1170                            regs->pN, crd_freed);
1171                         break;
1172                 }
1173         }
1174         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1175            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1176 }
1177
1178 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1179                                      struct pbf_pN_cmd_regs *regs,
1180                                      u32 poll_count)
1181 {
1182         u32 occup, to_free, freed, freed_start;
1183         u32 cur_cnt = poll_count;
1184
1185         occup = to_free = REG_RD(bp, regs->lines_occup);
1186         freed = freed_start = REG_RD(bp, regs->lines_freed);
1187
1188         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1189         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1190
1191         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1192                 if (cur_cnt--) {
1193                         udelay(FLR_WAIT_INTERVAL);
1194                         occup = REG_RD(bp, regs->lines_occup);
1195                         freed = REG_RD(bp, regs->lines_freed);
1196                 } else {
1197                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1198                            regs->pN);
1199                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1200                            regs->pN, occup);
1201                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1202                            regs->pN, freed);
1203                         break;
1204                 }
1205         }
1206         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1207            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1208 }
1209
1210 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1211                                     u32 expected, u32 poll_count)
1212 {
1213         u32 cur_cnt = poll_count;
1214         u32 val;
1215
1216         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1217                 udelay(FLR_WAIT_INTERVAL);
1218
1219         return val;
1220 }
1221
1222 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1223                                     char *msg, u32 poll_cnt)
1224 {
1225         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1226         if (val != 0) {
1227                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1228                 return 1;
1229         }
1230         return 0;
1231 }
1232
1233 /* Common routines with VF FLR cleanup */
1234 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1235 {
1236         /* adjust polling timeout */
1237         if (CHIP_REV_IS_EMUL(bp))
1238                 return FLR_POLL_CNT * 2000;
1239
1240         if (CHIP_REV_IS_FPGA(bp))
1241                 return FLR_POLL_CNT * 120;
1242
1243         return FLR_POLL_CNT;
1244 }
1245
1246 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1247 {
1248         struct pbf_pN_cmd_regs cmd_regs[] = {
1249                 {0, (CHIP_IS_E3B0(bp)) ?
1250                         PBF_REG_TQ_OCCUPANCY_Q0 :
1251                         PBF_REG_P0_TQ_OCCUPANCY,
1252                     (CHIP_IS_E3B0(bp)) ?
1253                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1254                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1255                 {1, (CHIP_IS_E3B0(bp)) ?
1256                         PBF_REG_TQ_OCCUPANCY_Q1 :
1257                         PBF_REG_P1_TQ_OCCUPANCY,
1258                     (CHIP_IS_E3B0(bp)) ?
1259                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1260                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1261                 {4, (CHIP_IS_E3B0(bp)) ?
1262                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1263                         PBF_REG_P4_TQ_OCCUPANCY,
1264                     (CHIP_IS_E3B0(bp)) ?
1265                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1266                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1267         };
1268
1269         struct pbf_pN_buf_regs buf_regs[] = {
1270                 {0, (CHIP_IS_E3B0(bp)) ?
1271                         PBF_REG_INIT_CRD_Q0 :
1272                         PBF_REG_P0_INIT_CRD ,
1273                     (CHIP_IS_E3B0(bp)) ?
1274                         PBF_REG_CREDIT_Q0 :
1275                         PBF_REG_P0_CREDIT,
1276                     (CHIP_IS_E3B0(bp)) ?
1277                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1278                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1279                 {1, (CHIP_IS_E3B0(bp)) ?
1280                         PBF_REG_INIT_CRD_Q1 :
1281                         PBF_REG_P1_INIT_CRD,
1282                     (CHIP_IS_E3B0(bp)) ?
1283                         PBF_REG_CREDIT_Q1 :
1284                         PBF_REG_P1_CREDIT,
1285                     (CHIP_IS_E3B0(bp)) ?
1286                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1287                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1288                 {4, (CHIP_IS_E3B0(bp)) ?
1289                         PBF_REG_INIT_CRD_LB_Q :
1290                         PBF_REG_P4_INIT_CRD,
1291                     (CHIP_IS_E3B0(bp)) ?
1292                         PBF_REG_CREDIT_LB_Q :
1293                         PBF_REG_P4_CREDIT,
1294                     (CHIP_IS_E3B0(bp)) ?
1295                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1296                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1297         };
1298
1299         int i;
1300
1301         /* Verify the command queues are flushed P0, P1, P4 */
1302         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1303                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1304
1305         /* Verify the transmission buffers are flushed P0, P1, P4 */
1306         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1307                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1308 }
1309
1310 #define OP_GEN_PARAM(param) \
1311         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1312
1313 #define OP_GEN_TYPE(type) \
1314         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1315
1316 #define OP_GEN_AGG_VECT(index) \
1317         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1318
1319 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1320 {
1321         u32 op_gen_command = 0;
1322         u32 comp_addr = BAR_CSTRORM_INTMEM +
1323                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1324         int ret = 0;
1325
1326         if (REG_RD(bp, comp_addr)) {
1327                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1328                 return 1;
1329         }
1330
1331         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1332         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1333         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1334         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1335
1336         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1337         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1338
1339         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1340                 BNX2X_ERR("FW final cleanup did not succeed\n");
1341                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1342                    (REG_RD(bp, comp_addr)));
1343                 bnx2x_panic();
1344                 return 1;
1345         }
1346         /* Zero completion for next FLR */
1347         REG_WR(bp, comp_addr, 0);
1348
1349         return ret;
1350 }
1351
1352 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1353 {
1354         u16 status;
1355
1356         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1357         return status & PCI_EXP_DEVSTA_TRPND;
1358 }
1359
1360 /* PF FLR specific routines
1361 */
1362 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1363 {
1364         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1365         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1366                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1367                         "CFC PF usage counter timed out",
1368                         poll_cnt))
1369                 return 1;
1370
1371         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1372         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1373                         DORQ_REG_PF_USAGE_CNT,
1374                         "DQ PF usage counter timed out",
1375                         poll_cnt))
1376                 return 1;
1377
1378         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1379         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1380                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1381                         "QM PF usage counter timed out",
1382                         poll_cnt))
1383                 return 1;
1384
1385         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1386         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1387                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1388                         "Timers VNIC usage counter timed out",
1389                         poll_cnt))
1390                 return 1;
1391         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1392                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1393                         "Timers NUM_SCANS usage counter timed out",
1394                         poll_cnt))
1395                 return 1;
1396
1397         /* Wait DMAE PF usage counter to zero */
1398         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1399                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1400                         "DMAE command register timed out",
1401                         poll_cnt))
1402                 return 1;
1403
1404         return 0;
1405 }
1406
1407 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1408 {
1409         u32 val;
1410
1411         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1412         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1413
1414         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1415         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1416
1417         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1418         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1419
1420         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1421         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1422
1423         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1424         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1425
1426         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1427         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1428
1429         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1430         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1431
1432         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1433         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1434            val);
1435 }
1436
1437 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1438 {
1439         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1440
1441         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1442
1443         /* Re-enable PF target read access */
1444         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1445
1446         /* Poll HW usage counters */
1447         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1448         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1449                 return -EBUSY;
1450
1451         /* Zero the igu 'trailing edge' and 'leading edge' */
1452
1453         /* Send the FW cleanup command */
1454         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1455                 return -EBUSY;
1456
1457         /* ATC cleanup */
1458
1459         /* Verify TX hw is flushed */
1460         bnx2x_tx_hw_flushed(bp, poll_cnt);
1461
1462         /* Wait 100ms (not adjusted according to platform) */
1463         msleep(100);
1464
1465         /* Verify no pending pci transactions */
1466         if (bnx2x_is_pcie_pending(bp->pdev))
1467                 BNX2X_ERR("PCIE Transactions still pending\n");
1468
1469         /* Debug */
1470         bnx2x_hw_enable_status(bp);
1471
1472         /*
1473          * Master enable - Due to WB DMAE writes performed before this
1474          * register is re-initialized as part of the regular function init
1475          */
1476         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1477
1478         return 0;
1479 }
1480
1481 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1482 {
1483         int port = BP_PORT(bp);
1484         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1485         u32 val = REG_RD(bp, addr);
1486         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1487         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1488         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1489
1490         if (msix) {
1491                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1492                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1493                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1494                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1495                 if (single_msix)
1496                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1497         } else if (msi) {
1498                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1499                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1500                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1501                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1502         } else {
1503                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1504                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1505                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1506                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1507
1508                 if (!CHIP_IS_E1(bp)) {
1509                         DP(NETIF_MSG_IFUP,
1510                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1511
1512                         REG_WR(bp, addr, val);
1513
1514                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1515                 }
1516         }
1517
1518         if (CHIP_IS_E1(bp))
1519                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1520
1521         DP(NETIF_MSG_IFUP,
1522            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1523            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1524
1525         REG_WR(bp, addr, val);
1526         /*
1527          * Ensure that HC_CONFIG is written before leading/trailing edge config
1528          */
1529         mmiowb();
1530         barrier();
1531
1532         if (!CHIP_IS_E1(bp)) {
1533                 /* init leading/trailing edge */
1534                 if (IS_MF(bp)) {
1535                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1536                         if (bp->port.pmf)
1537                                 /* enable nig and gpio3 attention */
1538                                 val |= 0x1100;
1539                 } else
1540                         val = 0xffff;
1541
1542                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1543                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1544         }
1545
1546         /* Make sure that interrupts are indeed enabled from here on */
1547         mmiowb();
1548 }
1549
1550 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1551 {
1552         u32 val;
1553         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1554         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1555         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1556
1557         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1558
1559         if (msix) {
1560                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1561                          IGU_PF_CONF_SINGLE_ISR_EN);
1562                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1563                         IGU_PF_CONF_ATTN_BIT_EN);
1564
1565                 if (single_msix)
1566                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1567         } else if (msi) {
1568                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1569                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1570                         IGU_PF_CONF_ATTN_BIT_EN |
1571                         IGU_PF_CONF_SINGLE_ISR_EN);
1572         } else {
1573                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1574                 val |= (IGU_PF_CONF_INT_LINE_EN |
1575                         IGU_PF_CONF_ATTN_BIT_EN |
1576                         IGU_PF_CONF_SINGLE_ISR_EN);
1577         }
1578
1579         /* Clean previous status - need to configure igu prior to ack*/
1580         if ((!msix) || single_msix) {
1581                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1582                 bnx2x_ack_int(bp);
1583         }
1584
1585         val |= IGU_PF_CONF_FUNC_EN;
1586
1587         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1588            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1589
1590         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1591
1592         if (val & IGU_PF_CONF_INT_LINE_EN)
1593                 pci_intx(bp->pdev, true);
1594
1595         barrier();
1596
1597         /* init leading/trailing edge */
1598         if (IS_MF(bp)) {
1599                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1600                 if (bp->port.pmf)
1601                         /* enable nig and gpio3 attention */
1602                         val |= 0x1100;
1603         } else
1604                 val = 0xffff;
1605
1606         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1607         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1608
1609         /* Make sure that interrupts are indeed enabled from here on */
1610         mmiowb();
1611 }
1612
1613 void bnx2x_int_enable(struct bnx2x *bp)
1614 {
1615         if (bp->common.int_block == INT_BLOCK_HC)
1616                 bnx2x_hc_int_enable(bp);
1617         else
1618                 bnx2x_igu_int_enable(bp);
1619 }
1620
1621 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1622 {
1623         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1624         int i, offset;
1625
1626         if (disable_hw)
1627                 /* prevent the HW from sending interrupts */
1628                 bnx2x_int_disable(bp);
1629
1630         /* make sure all ISRs are done */
1631         if (msix) {
1632                 synchronize_irq(bp->msix_table[0].vector);
1633                 offset = 1;
1634                 if (CNIC_SUPPORT(bp))
1635                         offset++;
1636                 for_each_eth_queue(bp, i)
1637                         synchronize_irq(bp->msix_table[offset++].vector);
1638         } else
1639                 synchronize_irq(bp->pdev->irq);
1640
1641         /* make sure sp_task is not running */
1642         cancel_delayed_work(&bp->sp_task);
1643         cancel_delayed_work(&bp->period_task);
1644         flush_workqueue(bnx2x_wq);
1645 }
1646
1647 /* fast path */
1648
1649 /*
1650  * General service functions
1651  */
1652
1653 /* Return true if succeeded to acquire the lock */
1654 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1655 {
1656         u32 lock_status;
1657         u32 resource_bit = (1 << resource);
1658         int func = BP_FUNC(bp);
1659         u32 hw_lock_control_reg;
1660
1661         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1662            "Trying to take a lock on resource %d\n", resource);
1663
1664         /* Validating that the resource is within range */
1665         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1666                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1667                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1668                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1669                 return false;
1670         }
1671
1672         if (func <= 5)
1673                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1674         else
1675                 hw_lock_control_reg =
1676                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1677
1678         /* Try to acquire the lock */
1679         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1680         lock_status = REG_RD(bp, hw_lock_control_reg);
1681         if (lock_status & resource_bit)
1682                 return true;
1683
1684         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1685            "Failed to get a lock on resource %d\n", resource);
1686         return false;
1687 }
1688
1689 /**
1690  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1691  *
1692  * @bp: driver handle
1693  *
1694  * Returns the recovery leader resource id according to the engine this function
1695  * belongs to. Currently only only 2 engines is supported.
1696  */
1697 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1698 {
1699         if (BP_PATH(bp))
1700                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1701         else
1702                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1703 }
1704
1705 /**
1706  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1707  *
1708  * @bp: driver handle
1709  *
1710  * Tries to acquire a leader lock for current engine.
1711  */
1712 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1713 {
1714         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1715 }
1716
1717 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1718
1719 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1720 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1721 {
1722         /* Set the interrupt occurred bit for the sp-task to recognize it
1723          * must ack the interrupt and transition according to the IGU
1724          * state machine.
1725          */
1726         atomic_set(&bp->interrupt_occurred, 1);
1727
1728         /* The sp_task must execute only after this bit
1729          * is set, otherwise we will get out of sync and miss all
1730          * further interrupts. Hence, the barrier.
1731          */
1732         smp_wmb();
1733
1734         /* schedule sp_task to workqueue */
1735         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1736 }
1737
1738 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1739 {
1740         struct bnx2x *bp = fp->bp;
1741         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1742         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1743         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1744         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1745
1746         DP(BNX2X_MSG_SP,
1747            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1748            fp->index, cid, command, bp->state,
1749            rr_cqe->ramrod_cqe.ramrod_type);
1750
1751         /* If cid is within VF range, replace the slowpath object with the
1752          * one corresponding to this VF
1753          */
1754         if (cid >= BNX2X_FIRST_VF_CID  &&
1755             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1756                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1757
1758         switch (command) {
1759         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1760                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1761                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1762                 break;
1763
1764         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1765                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1766                 drv_cmd = BNX2X_Q_CMD_SETUP;
1767                 break;
1768
1769         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1770                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1771                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1772                 break;
1773
1774         case (RAMROD_CMD_ID_ETH_HALT):
1775                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1776                 drv_cmd = BNX2X_Q_CMD_HALT;
1777                 break;
1778
1779         case (RAMROD_CMD_ID_ETH_TERMINATE):
1780                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1781                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1782                 break;
1783
1784         case (RAMROD_CMD_ID_ETH_EMPTY):
1785                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1786                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1787                 break;
1788
1789         default:
1790                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1791                           command, fp->index);
1792                 return;
1793         }
1794
1795         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1796             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1797                 /* q_obj->complete_cmd() failure means that this was
1798                  * an unexpected completion.
1799                  *
1800                  * In this case we don't want to increase the bp->spq_left
1801                  * because apparently we haven't sent this command the first
1802                  * place.
1803                  */
1804 #ifdef BNX2X_STOP_ON_ERROR
1805                 bnx2x_panic();
1806 #else
1807                 return;
1808 #endif
1809         /* SRIOV: reschedule any 'in_progress' operations */
1810         bnx2x_iov_sp_event(bp, cid, true);
1811
1812         smp_mb__before_atomic_inc();
1813         atomic_inc(&bp->cq_spq_left);
1814         /* push the change in bp->spq_left and towards the memory */
1815         smp_mb__after_atomic_inc();
1816
1817         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1818
1819         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1820             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1821                 /* if Q update ramrod is completed for last Q in AFEX vif set
1822                  * flow, then ACK MCP at the end
1823                  *
1824                  * mark pending ACK to MCP bit.
1825                  * prevent case that both bits are cleared.
1826                  * At the end of load/unload driver checks that
1827                  * sp_state is cleared, and this order prevents
1828                  * races
1829                  */
1830                 smp_mb__before_clear_bit();
1831                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1832                 wmb();
1833                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1834                 smp_mb__after_clear_bit();
1835
1836                 /* schedule the sp task as mcp ack is required */
1837                 bnx2x_schedule_sp_task(bp);
1838         }
1839
1840         return;
1841 }
1842
1843 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1844 {
1845         struct bnx2x *bp = netdev_priv(dev_instance);
1846         u16 status = bnx2x_ack_int(bp);
1847         u16 mask;
1848         int i;
1849         u8 cos;
1850
1851         /* Return here if interrupt is shared and it's not for us */
1852         if (unlikely(status == 0)) {
1853                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1854                 return IRQ_NONE;
1855         }
1856         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1857
1858 #ifdef BNX2X_STOP_ON_ERROR
1859         if (unlikely(bp->panic))
1860                 return IRQ_HANDLED;
1861 #endif
1862
1863         for_each_eth_queue(bp, i) {
1864                 struct bnx2x_fastpath *fp = &bp->fp[i];
1865
1866                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1867                 if (status & mask) {
1868                         /* Handle Rx or Tx according to SB id */
1869                         for_each_cos_in_tx_queue(fp, cos)
1870                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1871                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1872                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1873                         status &= ~mask;
1874                 }
1875         }
1876
1877         if (CNIC_SUPPORT(bp)) {
1878                 mask = 0x2;
1879                 if (status & (mask | 0x1)) {
1880                         struct cnic_ops *c_ops = NULL;
1881
1882                         rcu_read_lock();
1883                         c_ops = rcu_dereference(bp->cnic_ops);
1884                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1885                                       CNIC_DRV_STATE_HANDLES_IRQ))
1886                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1887                         rcu_read_unlock();
1888
1889                         status &= ~mask;
1890                 }
1891         }
1892
1893         if (unlikely(status & 0x1)) {
1894
1895                 /* schedule sp task to perform default status block work, ack
1896                  * attentions and enable interrupts.
1897                  */
1898                 bnx2x_schedule_sp_task(bp);
1899
1900                 status &= ~0x1;
1901                 if (!status)
1902                         return IRQ_HANDLED;
1903         }
1904
1905         if (unlikely(status))
1906                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1907                    status);
1908
1909         return IRQ_HANDLED;
1910 }
1911
1912 /* Link */
1913
1914 /*
1915  * General service functions
1916  */
1917
1918 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1919 {
1920         u32 lock_status;
1921         u32 resource_bit = (1 << resource);
1922         int func = BP_FUNC(bp);
1923         u32 hw_lock_control_reg;
1924         int cnt;
1925
1926         /* Validating that the resource is within range */
1927         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1928                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1929                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1930                 return -EINVAL;
1931         }
1932
1933         if (func <= 5) {
1934                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1935         } else {
1936                 hw_lock_control_reg =
1937                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1938         }
1939
1940         /* Validating that the resource is not already taken */
1941         lock_status = REG_RD(bp, hw_lock_control_reg);
1942         if (lock_status & resource_bit) {
1943                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
1944                    lock_status, resource_bit);
1945                 return -EEXIST;
1946         }
1947
1948         /* Try for 5 second every 5ms */
1949         for (cnt = 0; cnt < 1000; cnt++) {
1950                 /* Try to acquire the lock */
1951                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1952                 lock_status = REG_RD(bp, hw_lock_control_reg);
1953                 if (lock_status & resource_bit)
1954                         return 0;
1955
1956                 usleep_range(5000, 10000);
1957         }
1958         BNX2X_ERR("Timeout\n");
1959         return -EAGAIN;
1960 }
1961
1962 int bnx2x_release_leader_lock(struct bnx2x *bp)
1963 {
1964         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1965 }
1966
1967 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1968 {
1969         u32 lock_status;
1970         u32 resource_bit = (1 << resource);
1971         int func = BP_FUNC(bp);
1972         u32 hw_lock_control_reg;
1973
1974         /* Validating that the resource is within range */
1975         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1976                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1977                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1978                 return -EINVAL;
1979         }
1980
1981         if (func <= 5) {
1982                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1983         } else {
1984                 hw_lock_control_reg =
1985                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1986         }
1987
1988         /* Validating that the resource is currently taken */
1989         lock_status = REG_RD(bp, hw_lock_control_reg);
1990         if (!(lock_status & resource_bit)) {
1991                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
1992                           lock_status, resource_bit);
1993                 return -EFAULT;
1994         }
1995
1996         REG_WR(bp, hw_lock_control_reg, resource_bit);
1997         return 0;
1998 }
1999
2000 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2001 {
2002         /* The GPIO should be swapped if swap register is set and active */
2003         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005         int gpio_shift = gpio_num +
2006                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007         u32 gpio_mask = (1 << gpio_shift);
2008         u32 gpio_reg;
2009         int value;
2010
2011         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2012                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2013                 return -EINVAL;
2014         }
2015
2016         /* read GPIO value */
2017         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2018
2019         /* get the requested pin value */
2020         if ((gpio_reg & gpio_mask) == gpio_mask)
2021                 value = 1;
2022         else
2023                 value = 0;
2024
2025         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2026
2027         return value;
2028 }
2029
2030 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2031 {
2032         /* The GPIO should be swapped if swap register is set and active */
2033         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2034                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2035         int gpio_shift = gpio_num +
2036                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2037         u32 gpio_mask = (1 << gpio_shift);
2038         u32 gpio_reg;
2039
2040         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2041                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2046         /* read GPIO and mask except the float bits */
2047         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK,
2052                    "Set GPIO %d (shift %d) -> output low\n",
2053                    gpio_num, gpio_shift);
2054                 /* clear FLOAT and set CLR */
2055                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2056                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2057                 break;
2058
2059         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2060                 DP(NETIF_MSG_LINK,
2061                    "Set GPIO %d (shift %d) -> output high\n",
2062                    gpio_num, gpio_shift);
2063                 /* clear FLOAT and set SET */
2064                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2065                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2066                 break;
2067
2068         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2069                 DP(NETIF_MSG_LINK,
2070                    "Set GPIO %d (shift %d) -> input\n",
2071                    gpio_num, gpio_shift);
2072                 /* set FLOAT */
2073                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074                 break;
2075
2076         default:
2077                 break;
2078         }
2079
2080         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2081         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083         return 0;
2084 }
2085
2086 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2087 {
2088         u32 gpio_reg = 0;
2089         int rc = 0;
2090
2091         /* Any port swapping should be handled by caller. */
2092
2093         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2094         /* read GPIO and mask except the float bits */
2095         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2096         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2097         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2098         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2099
2100         switch (mode) {
2101         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2102                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2103                 /* set CLR */
2104                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2105                 break;
2106
2107         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2108                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2109                 /* set SET */
2110                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2111                 break;
2112
2113         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2114                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2115                 /* set FLOAT */
2116                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2117                 break;
2118
2119         default:
2120                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2121                 rc = -EINVAL;
2122                 break;
2123         }
2124
2125         if (rc == 0)
2126                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2127
2128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2129
2130         return rc;
2131 }
2132
2133 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2134 {
2135         /* The GPIO should be swapped if swap register is set and active */
2136         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2137                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2138         int gpio_shift = gpio_num +
2139                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2140         u32 gpio_mask = (1 << gpio_shift);
2141         u32 gpio_reg;
2142
2143         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2144                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2145                 return -EINVAL;
2146         }
2147
2148         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2149         /* read GPIO int */
2150         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2151
2152         switch (mode) {
2153         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2154                 DP(NETIF_MSG_LINK,
2155                    "Clear GPIO INT %d (shift %d) -> output low\n",
2156                    gpio_num, gpio_shift);
2157                 /* clear SET and set CLR */
2158                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2159                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2160                 break;
2161
2162         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2163                 DP(NETIF_MSG_LINK,
2164                    "Set GPIO INT %d (shift %d) -> output high\n",
2165                    gpio_num, gpio_shift);
2166                 /* clear CLR and set SET */
2167                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2168                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2169                 break;
2170
2171         default:
2172                 break;
2173         }
2174
2175         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2176         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2177
2178         return 0;
2179 }
2180
2181 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2182 {
2183         u32 spio_reg;
2184
2185         /* Only 2 SPIOs are configurable */
2186         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2187                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2188                 return -EINVAL;
2189         }
2190
2191         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2192         /* read SPIO and mask except the float bits */
2193         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2194
2195         switch (mode) {
2196         case MISC_SPIO_OUTPUT_LOW:
2197                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2198                 /* clear FLOAT and set CLR */
2199                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2200                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2201                 break;
2202
2203         case MISC_SPIO_OUTPUT_HIGH:
2204                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2205                 /* clear FLOAT and set SET */
2206                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2207                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2208                 break;
2209
2210         case MISC_SPIO_INPUT_HI_Z:
2211                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2212                 /* set FLOAT */
2213                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2214                 break;
2215
2216         default:
2217                 break;
2218         }
2219
2220         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2221         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2222
2223         return 0;
2224 }
2225
2226 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2227 {
2228         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2229         switch (bp->link_vars.ieee_fc &
2230                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2231         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2232                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2233                                                    ADVERTISED_Pause);
2234                 break;
2235
2236         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2237                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2238                                                   ADVERTISED_Pause);
2239                 break;
2240
2241         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2242                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2243                 break;
2244
2245         default:
2246                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2247                                                    ADVERTISED_Pause);
2248                 break;
2249         }
2250 }
2251
2252 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2253 {
2254         /* Initialize link parameters structure variables
2255          * It is recommended to turn off RX FC for jumbo frames
2256          *  for better performance
2257          */
2258         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2259                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2260         else
2261                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2262 }
2263
2264 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2265 {
2266         u32 pause_enabled = 0;
2267
2268         if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2269                 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2270                         pause_enabled = 1;
2271
2272                 REG_WR(bp, BAR_USTRORM_INTMEM +
2273                            USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2274                        pause_enabled);
2275         }
2276
2277         DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2278            pause_enabled ? "enabled" : "disabled");
2279 }
2280
2281 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2282 {
2283         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2284         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2285
2286         if (!BP_NOMCP(bp)) {
2287                 bnx2x_set_requested_fc(bp);
2288                 bnx2x_acquire_phy_lock(bp);
2289
2290                 if (load_mode == LOAD_DIAG) {
2291                         struct link_params *lp = &bp->link_params;
2292                         lp->loopback_mode = LOOPBACK_XGXS;
2293                         /* do PHY loopback at 10G speed, if possible */
2294                         if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2295                                 if (lp->speed_cap_mask[cfx_idx] &
2296                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2297                                         lp->req_line_speed[cfx_idx] =
2298                                         SPEED_10000;
2299                                 else
2300                                         lp->req_line_speed[cfx_idx] =
2301                                         SPEED_1000;
2302                         }
2303                 }
2304
2305                 if (load_mode == LOAD_LOOPBACK_EXT) {
2306                         struct link_params *lp = &bp->link_params;
2307                         lp->loopback_mode = LOOPBACK_EXT;
2308                 }
2309
2310                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2311
2312                 bnx2x_release_phy_lock(bp);
2313
2314                 bnx2x_init_dropless_fc(bp);
2315
2316                 bnx2x_calc_fc_adv(bp);
2317
2318                 if (bp->link_vars.link_up) {
2319                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2320                         bnx2x_link_report(bp);
2321                 }
2322                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2323                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2324                 return rc;
2325         }
2326         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2327         return -EINVAL;
2328 }
2329
2330 void bnx2x_link_set(struct bnx2x *bp)
2331 {
2332         if (!BP_NOMCP(bp)) {
2333                 bnx2x_acquire_phy_lock(bp);
2334                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2335                 bnx2x_release_phy_lock(bp);
2336
2337                 bnx2x_init_dropless_fc(bp);
2338
2339                 bnx2x_calc_fc_adv(bp);
2340         } else
2341                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2342 }
2343
2344 static void bnx2x__link_reset(struct bnx2x *bp)
2345 {
2346         if (!BP_NOMCP(bp)) {
2347                 bnx2x_acquire_phy_lock(bp);
2348                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2349                 bnx2x_release_phy_lock(bp);
2350         } else
2351                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2352 }
2353
2354 void bnx2x_force_link_reset(struct bnx2x *bp)
2355 {
2356         bnx2x_acquire_phy_lock(bp);
2357         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2358         bnx2x_release_phy_lock(bp);
2359 }
2360
2361 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2362 {
2363         u8 rc = 0;
2364
2365         if (!BP_NOMCP(bp)) {
2366                 bnx2x_acquire_phy_lock(bp);
2367                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2368                                      is_serdes);
2369                 bnx2x_release_phy_lock(bp);
2370         } else
2371                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2372
2373         return rc;
2374 }
2375
2376 /* Calculates the sum of vn_min_rates.
2377    It's needed for further normalizing of the min_rates.
2378    Returns:
2379      sum of vn_min_rates.
2380        or
2381      0 - if all the min_rates are 0.
2382      In the later case fairness algorithm should be deactivated.
2383      If not all min_rates are zero then those that are zeroes will be set to 1.
2384  */
2385 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2386                                       struct cmng_init_input *input)
2387 {
2388         int all_zero = 1;
2389         int vn;
2390
2391         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2392                 u32 vn_cfg = bp->mf_config[vn];
2393                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2394                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2395
2396                 /* Skip hidden vns */
2397                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2398                         vn_min_rate = 0;
2399                 /* If min rate is zero - set it to 1 */
2400                 else if (!vn_min_rate)
2401                         vn_min_rate = DEF_MIN_RATE;
2402                 else
2403                         all_zero = 0;
2404
2405                 input->vnic_min_rate[vn] = vn_min_rate;
2406         }
2407
2408         /* if ETS or all min rates are zeros - disable fairness */
2409         if (BNX2X_IS_ETS_ENABLED(bp)) {
2410                 input->flags.cmng_enables &=
2411                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2412                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2413         } else if (all_zero) {
2414                 input->flags.cmng_enables &=
2415                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2416                 DP(NETIF_MSG_IFUP,
2417                    "All MIN values are zeroes fairness will be disabled\n");
2418         } else
2419                 input->flags.cmng_enables |=
2420                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2421 }
2422
2423 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2424                                     struct cmng_init_input *input)
2425 {
2426         u16 vn_max_rate;
2427         u32 vn_cfg = bp->mf_config[vn];
2428
2429         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2430                 vn_max_rate = 0;
2431         else {
2432                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2433
2434                 if (IS_MF_SI(bp)) {
2435                         /* maxCfg in percents of linkspeed */
2436                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2437                 } else /* SD modes */
2438                         /* maxCfg is absolute in 100Mb units */
2439                         vn_max_rate = maxCfg * 100;
2440         }
2441
2442         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2443
2444         input->vnic_max_rate[vn] = vn_max_rate;
2445 }
2446
2447 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2448 {
2449         if (CHIP_REV_IS_SLOW(bp))
2450                 return CMNG_FNS_NONE;
2451         if (IS_MF(bp))
2452                 return CMNG_FNS_MINMAX;
2453
2454         return CMNG_FNS_NONE;
2455 }
2456
2457 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2458 {
2459         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2460
2461         if (BP_NOMCP(bp))
2462                 return; /* what should be the default value in this case */
2463
2464         /* For 2 port configuration the absolute function number formula
2465          * is:
2466          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2467          *
2468          *      and there are 4 functions per port
2469          *
2470          * For 4 port configuration it is
2471          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2472          *
2473          *      and there are 2 functions per port
2474          */
2475         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2476                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2477
2478                 if (func >= E1H_FUNC_MAX)
2479                         break;
2480
2481                 bp->mf_config[vn] =
2482                         MF_CFG_RD(bp, func_mf_config[func].config);
2483         }
2484         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2485                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2486                 bp->flags |= MF_FUNC_DIS;
2487         } else {
2488                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2489                 bp->flags &= ~MF_FUNC_DIS;
2490         }
2491 }
2492
2493 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2494 {
2495         struct cmng_init_input input;
2496         memset(&input, 0, sizeof(struct cmng_init_input));
2497
2498         input.port_rate = bp->link_vars.line_speed;
2499
2500         if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2501                 int vn;
2502
2503                 /* read mf conf from shmem */
2504                 if (read_cfg)
2505                         bnx2x_read_mf_cfg(bp);
2506
2507                 /* vn_weight_sum and enable fairness if not 0 */
2508                 bnx2x_calc_vn_min(bp, &input);
2509
2510                 /* calculate and set min-max rate for each vn */
2511                 if (bp->port.pmf)
2512                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2513                                 bnx2x_calc_vn_max(bp, vn, &input);
2514
2515                 /* always enable rate shaping and fairness */
2516                 input.flags.cmng_enables |=
2517                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2518
2519                 bnx2x_init_cmng(&input, &bp->cmng);
2520                 return;
2521         }
2522
2523         /* rate shaping and fairness are disabled */
2524         DP(NETIF_MSG_IFUP,
2525            "rate shaping and fairness are disabled\n");
2526 }
2527
2528 static void storm_memset_cmng(struct bnx2x *bp,
2529                               struct cmng_init *cmng,
2530                               u8 port)
2531 {
2532         int vn;
2533         size_t size = sizeof(struct cmng_struct_per_port);
2534
2535         u32 addr = BAR_XSTRORM_INTMEM +
2536                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2537
2538         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2539
2540         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2541                 int func = func_by_vn(bp, vn);
2542
2543                 addr = BAR_XSTRORM_INTMEM +
2544                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2545                 size = sizeof(struct rate_shaping_vars_per_vn);
2546                 __storm_memset_struct(bp, addr, size,
2547                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2548
2549                 addr = BAR_XSTRORM_INTMEM +
2550                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2551                 size = sizeof(struct fairness_vars_per_vn);
2552                 __storm_memset_struct(bp, addr, size,
2553                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2554         }
2555 }
2556
2557 /* init cmng mode in HW according to local configuration */
2558 void bnx2x_set_local_cmng(struct bnx2x *bp)
2559 {
2560         int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2561
2562         if (cmng_fns != CMNG_FNS_NONE) {
2563                 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2564                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2565         } else {
2566                 /* rate shaping and fairness are disabled */
2567                 DP(NETIF_MSG_IFUP,
2568                    "single function mode without fairness\n");
2569         }
2570 }
2571
2572 /* This function is called upon link interrupt */
2573 static void bnx2x_link_attn(struct bnx2x *bp)
2574 {
2575         /* Make sure that we are synced with the current statistics */
2576         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2577
2578         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2579
2580         bnx2x_init_dropless_fc(bp);
2581
2582         if (bp->link_vars.link_up) {
2583
2584                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2585                         struct host_port_stats *pstats;
2586
2587                         pstats = bnx2x_sp(bp, port_stats);
2588                         /* reset old mac stats */
2589                         memset(&(pstats->mac_stx[0]), 0,
2590                                sizeof(struct mac_stx));
2591                 }
2592                 if (bp->state == BNX2X_STATE_OPEN)
2593                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2594         }
2595
2596         if (bp->link_vars.link_up && bp->link_vars.line_speed)
2597                 bnx2x_set_local_cmng(bp);
2598
2599         __bnx2x_link_report(bp);
2600
2601         if (IS_MF(bp))
2602                 bnx2x_link_sync_notify(bp);
2603 }
2604
2605 void bnx2x__link_status_update(struct bnx2x *bp)
2606 {
2607         if (bp->state != BNX2X_STATE_OPEN)
2608                 return;
2609
2610         /* read updated dcb configuration */
2611         if (IS_PF(bp)) {
2612                 bnx2x_dcbx_pmf_update(bp);
2613                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2614                 if (bp->link_vars.link_up)
2615                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2616                 else
2617                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2618                         /* indicate link status */
2619                 bnx2x_link_report(bp);
2620
2621         } else { /* VF */
2622                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2623                                           SUPPORTED_10baseT_Full |
2624                                           SUPPORTED_100baseT_Half |
2625                                           SUPPORTED_100baseT_Full |
2626                                           SUPPORTED_1000baseT_Full |
2627                                           SUPPORTED_2500baseX_Full |
2628                                           SUPPORTED_10000baseT_Full |
2629                                           SUPPORTED_TP |
2630                                           SUPPORTED_FIBRE |
2631                                           SUPPORTED_Autoneg |
2632                                           SUPPORTED_Pause |
2633                                           SUPPORTED_Asym_Pause);
2634                 bp->port.advertising[0] = bp->port.supported[0];
2635
2636                 bp->link_params.bp = bp;
2637                 bp->link_params.port = BP_PORT(bp);
2638                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2639                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2640                 bp->link_params.req_line_speed[0] = SPEED_10000;
2641                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2642                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2643                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2644                 bp->link_vars.line_speed = SPEED_10000;
2645                 bp->link_vars.link_status =
2646                         (LINK_STATUS_LINK_UP |
2647                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2648                 bp->link_vars.link_up = 1;
2649                 bp->link_vars.duplex = DUPLEX_FULL;
2650                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2651                 __bnx2x_link_report(bp);
2652                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2653         }
2654 }
2655
2656 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2657                                   u16 vlan_val, u8 allowed_prio)
2658 {
2659         struct bnx2x_func_state_params func_params = {NULL};
2660         struct bnx2x_func_afex_update_params *f_update_params =
2661                 &func_params.params.afex_update;
2662
2663         func_params.f_obj = &bp->func_obj;
2664         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2665
2666         /* no need to wait for RAMROD completion, so don't
2667          * set RAMROD_COMP_WAIT flag
2668          */
2669
2670         f_update_params->vif_id = vifid;
2671         f_update_params->afex_default_vlan = vlan_val;
2672         f_update_params->allowed_priorities = allowed_prio;
2673
2674         /* if ramrod can not be sent, response to MCP immediately */
2675         if (bnx2x_func_state_change(bp, &func_params) < 0)
2676                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2677
2678         return 0;
2679 }
2680
2681 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2682                                           u16 vif_index, u8 func_bit_map)
2683 {
2684         struct bnx2x_func_state_params func_params = {NULL};
2685         struct bnx2x_func_afex_viflists_params *update_params =
2686                 &func_params.params.afex_viflists;
2687         int rc;
2688         u32 drv_msg_code;
2689
2690         /* validate only LIST_SET and LIST_GET are received from switch */
2691         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2692                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2693                           cmd_type);
2694
2695         func_params.f_obj = &bp->func_obj;
2696         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2697
2698         /* set parameters according to cmd_type */
2699         update_params->afex_vif_list_command = cmd_type;
2700         update_params->vif_list_index = vif_index;
2701         update_params->func_bit_map =
2702                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2703         update_params->func_to_clear = 0;
2704         drv_msg_code =
2705                 (cmd_type == VIF_LIST_RULE_GET) ?
2706                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2707                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2708
2709         /* if ramrod can not be sent, respond to MCP immediately for
2710          * SET and GET requests (other are not triggered from MCP)
2711          */
2712         rc = bnx2x_func_state_change(bp, &func_params);
2713         if (rc < 0)
2714                 bnx2x_fw_command(bp, drv_msg_code, 0);
2715
2716         return 0;
2717 }
2718
2719 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2720 {
2721         struct afex_stats afex_stats;
2722         u32 func = BP_ABS_FUNC(bp);
2723         u32 mf_config;
2724         u16 vlan_val;
2725         u32 vlan_prio;
2726         u16 vif_id;
2727         u8 allowed_prio;
2728         u8 vlan_mode;
2729         u32 addr_to_write, vifid, addrs, stats_type, i;
2730
2731         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2732                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2733                 DP(BNX2X_MSG_MCP,
2734                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2735                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2736         }
2737
2738         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2739                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2740                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2741                 DP(BNX2X_MSG_MCP,
2742                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2743                    vifid, addrs);
2744                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2745                                                addrs);
2746         }
2747
2748         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2749                 addr_to_write = SHMEM2_RD(bp,
2750                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2751                 stats_type = SHMEM2_RD(bp,
2752                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2753
2754                 DP(BNX2X_MSG_MCP,
2755                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2756                    addr_to_write);
2757
2758                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2759
2760                 /* write response to scratchpad, for MCP */
2761                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2762                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2763                                *(((u32 *)(&afex_stats))+i));
2764
2765                 /* send ack message to MCP */
2766                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2767         }
2768
2769         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2770                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2771                 bp->mf_config[BP_VN(bp)] = mf_config;
2772                 DP(BNX2X_MSG_MCP,
2773                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2774                    mf_config);
2775
2776                 /* if VIF_SET is "enabled" */
2777                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2778                         /* set rate limit directly to internal RAM */
2779                         struct cmng_init_input cmng_input;
2780                         struct rate_shaping_vars_per_vn m_rs_vn;
2781                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2782                         u32 addr = BAR_XSTRORM_INTMEM +
2783                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2784
2785                         bp->mf_config[BP_VN(bp)] = mf_config;
2786
2787                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2788                         m_rs_vn.vn_counter.rate =
2789                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2790                         m_rs_vn.vn_counter.quota =
2791                                 (m_rs_vn.vn_counter.rate *
2792                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2793
2794                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2795
2796                         /* read relevant values from mf_cfg struct in shmem */
2797                         vif_id =
2798                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2799                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2800                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2801                         vlan_val =
2802                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2803                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2804                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2805                         vlan_prio = (mf_config &
2806                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2807                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2808                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2809                         vlan_mode =
2810                                 (MF_CFG_RD(bp,
2811                                            func_mf_config[func].afex_config) &
2812                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2813                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2814                         allowed_prio =
2815                                 (MF_CFG_RD(bp,
2816                                            func_mf_config[func].afex_config) &
2817                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2818                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2819
2820                         /* send ramrod to FW, return in case of failure */
2821                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2822                                                    allowed_prio))
2823                                 return;
2824
2825                         bp->afex_def_vlan_tag = vlan_val;
2826                         bp->afex_vlan_mode = vlan_mode;
2827                 } else {
2828                         /* notify link down because BP->flags is disabled */
2829                         bnx2x_link_report(bp);
2830
2831                         /* send INVALID VIF ramrod to FW */
2832                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2833
2834                         /* Reset the default afex VLAN */
2835                         bp->afex_def_vlan_tag = -1;
2836                 }
2837         }
2838 }
2839
2840 static void bnx2x_pmf_update(struct bnx2x *bp)
2841 {
2842         int port = BP_PORT(bp);
2843         u32 val;
2844
2845         bp->port.pmf = 1;
2846         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2847
2848         /*
2849          * We need the mb() to ensure the ordering between the writing to
2850          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2851          */
2852         smp_mb();
2853
2854         /* queue a periodic task */
2855         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2856
2857         bnx2x_dcbx_pmf_update(bp);
2858
2859         /* enable nig attention */
2860         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2861         if (bp->common.int_block == INT_BLOCK_HC) {
2862                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2863                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2864         } else if (!CHIP_IS_E1x(bp)) {
2865                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2866                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2867         }
2868
2869         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2870 }
2871
2872 /* end of Link */
2873
2874 /* slow path */
2875
2876 /*
2877  * General service functions
2878  */
2879
2880 /* send the MCP a request, block until there is a reply */
2881 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2882 {
2883         int mb_idx = BP_FW_MB_IDX(bp);
2884         u32 seq;
2885         u32 rc = 0;
2886         u32 cnt = 1;
2887         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2888
2889         mutex_lock(&bp->fw_mb_mutex);
2890         seq = ++bp->fw_seq;
2891         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2892         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2893
2894         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2895                         (command | seq), param);
2896
2897         do {
2898                 /* let the FW do it's magic ... */
2899                 msleep(delay);
2900
2901                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2902
2903                 /* Give the FW up to 5 second (500*10ms) */
2904         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2905
2906         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2907            cnt*delay, rc, seq);
2908
2909         /* is this a reply to our command? */
2910         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2911                 rc &= FW_MSG_CODE_MASK;
2912         else {
2913                 /* FW BUG! */
2914                 BNX2X_ERR("FW failed to respond!\n");
2915                 bnx2x_fw_dump(bp);
2916                 rc = 0;
2917         }
2918         mutex_unlock(&bp->fw_mb_mutex);
2919
2920         return rc;
2921 }
2922
2923 static void storm_memset_func_cfg(struct bnx2x *bp,
2924                                  struct tstorm_eth_function_common_config *tcfg,
2925                                  u16 abs_fid)
2926 {
2927         size_t size = sizeof(struct tstorm_eth_function_common_config);
2928
2929         u32 addr = BAR_TSTRORM_INTMEM +
2930                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2931
2932         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2933 }
2934
2935 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2936 {
2937         if (CHIP_IS_E1x(bp)) {
2938                 struct tstorm_eth_function_common_config tcfg = {0};
2939
2940                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2941         }
2942
2943         /* Enable the function in the FW */
2944         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2945         storm_memset_func_en(bp, p->func_id, 1);
2946
2947         /* spq */
2948         if (p->func_flgs & FUNC_FLG_SPQ) {
2949                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2950                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2951                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2952         }
2953 }
2954
2955 /**
2956  * bnx2x_get_common_flags - Return common flags
2957  *
2958  * @bp          device handle
2959  * @fp          queue handle
2960  * @zero_stats  TRUE if statistics zeroing is needed
2961  *
2962  * Return the flags that are common for the Tx-only and not normal connections.
2963  */
2964 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2965                                             struct bnx2x_fastpath *fp,
2966                                             bool zero_stats)
2967 {
2968         unsigned long flags = 0;
2969
2970         /* PF driver will always initialize the Queue to an ACTIVE state */
2971         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2972
2973         /* tx only connections collect statistics (on the same index as the
2974          * parent connection). The statistics are zeroed when the parent
2975          * connection is initialized.
2976          */
2977
2978         __set_bit(BNX2X_Q_FLG_STATS, &flags);
2979         if (zero_stats)
2980                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2981
2982         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
2983         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
2984
2985 #ifdef BNX2X_STOP_ON_ERROR
2986         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
2987 #endif
2988
2989         return flags;
2990 }
2991
2992 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2993                                        struct bnx2x_fastpath *fp,
2994                                        bool leading)
2995 {
2996         unsigned long flags = 0;
2997
2998         /* calculate other queue flags */
2999         if (IS_MF_SD(bp))
3000                 __set_bit(BNX2X_Q_FLG_OV, &flags);
3001
3002         if (IS_FCOE_FP(fp)) {
3003                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3004                 /* For FCoE - force usage of default priority (for afex) */
3005                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3006         }
3007
3008         if (!fp->disable_tpa) {
3009                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3010                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3011                 if (fp->mode == TPA_MODE_GRO)
3012                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3013         }
3014
3015         if (leading) {
3016                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3017                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3018         }
3019
3020         /* Always set HW VLAN stripping */
3021         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3022
3023         /* configure silent vlan removal */
3024         if (IS_MF_AFEX(bp))
3025                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3026
3027         return flags | bnx2x_get_common_flags(bp, fp, true);
3028 }
3029
3030 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3031         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3032         u8 cos)
3033 {
3034         gen_init->stat_id = bnx2x_stats_id(fp);
3035         gen_init->spcl_id = fp->cl_id;
3036
3037         /* Always use mini-jumbo MTU for FCoE L2 ring */
3038         if (IS_FCOE_FP(fp))
3039                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3040         else
3041                 gen_init->mtu = bp->dev->mtu;
3042
3043         gen_init->cos = cos;
3044 }
3045
3046 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3047         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3048         struct bnx2x_rxq_setup_params *rxq_init)
3049 {
3050         u8 max_sge = 0;
3051         u16 sge_sz = 0;
3052         u16 tpa_agg_size = 0;
3053
3054         if (!fp->disable_tpa) {
3055                 pause->sge_th_lo = SGE_TH_LO(bp);
3056                 pause->sge_th_hi = SGE_TH_HI(bp);
3057
3058                 /* validate SGE ring has enough to cross high threshold */
3059                 WARN_ON(bp->dropless_fc &&
3060                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3061                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3062
3063                 tpa_agg_size = TPA_AGG_SIZE;
3064                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3065                         SGE_PAGE_SHIFT;
3066                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3067                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3068                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3069         }
3070
3071         /* pause - not for e1 */
3072         if (!CHIP_IS_E1(bp)) {
3073                 pause->bd_th_lo = BD_TH_LO(bp);
3074                 pause->bd_th_hi = BD_TH_HI(bp);
3075
3076                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3077                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3078                 /*
3079                  * validate that rings have enough entries to cross
3080                  * high thresholds
3081                  */
3082                 WARN_ON(bp->dropless_fc &&
3083                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3084                                 bp->rx_ring_size);
3085                 WARN_ON(bp->dropless_fc &&
3086                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3087                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3088
3089                 pause->pri_map = 1;
3090         }
3091
3092         /* rxq setup */
3093         rxq_init->dscr_map = fp->rx_desc_mapping;
3094         rxq_init->sge_map = fp->rx_sge_mapping;
3095         rxq_init->rcq_map = fp->rx_comp_mapping;
3096         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3097
3098         /* This should be a maximum number of data bytes that may be
3099          * placed on the BD (not including paddings).
3100          */
3101         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3102                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3103
3104         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3105         rxq_init->tpa_agg_sz = tpa_agg_size;
3106         rxq_init->sge_buf_sz = sge_sz;
3107         rxq_init->max_sges_pkt = max_sge;
3108         rxq_init->rss_engine_id = BP_FUNC(bp);
3109         rxq_init->mcast_engine_id = BP_FUNC(bp);
3110
3111         /* Maximum number or simultaneous TPA aggregation for this Queue.
3112          *
3113          * For PF Clients it should be the maximum available number.
3114          * VF driver(s) may want to define it to a smaller value.
3115          */
3116         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3117
3118         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3119         rxq_init->fw_sb_id = fp->fw_sb_id;
3120
3121         if (IS_FCOE_FP(fp))
3122                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3123         else
3124                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3125         /* configure silent vlan removal
3126          * if multi function mode is afex, then mask default vlan
3127          */
3128         if (IS_MF_AFEX(bp)) {
3129                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3130                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3131         }
3132 }
3133
3134 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3135         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3136         u8 cos)
3137 {
3138         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3139         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3140         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3141         txq_init->fw_sb_id = fp->fw_sb_id;
3142
3143         /*
3144          * set the tss leading client id for TX classification ==
3145          * leading RSS client id
3146          */
3147         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3148
3149         if (IS_FCOE_FP(fp)) {
3150                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3151                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3152         }
3153 }
3154
3155 static void bnx2x_pf_init(struct bnx2x *bp)
3156 {
3157         struct bnx2x_func_init_params func_init = {0};
3158         struct event_ring_data eq_data = { {0} };
3159         u16 flags;
3160
3161         if (!CHIP_IS_E1x(bp)) {
3162                 /* reset IGU PF statistics: MSIX + ATTN */
3163                 /* PF */
3164                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3165                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3166                            (CHIP_MODE_IS_4_PORT(bp) ?
3167                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3168                 /* ATTN */
3169                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3170                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3171                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3172                            (CHIP_MODE_IS_4_PORT(bp) ?
3173                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3174         }
3175
3176         /* function setup flags */
3177         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3178
3179         /* This flag is relevant for E1x only.
3180          * E2 doesn't have a TPA configuration in a function level.
3181          */
3182         flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3183
3184         func_init.func_flgs = flags;
3185         func_init.pf_id = BP_FUNC(bp);
3186         func_init.func_id = BP_FUNC(bp);
3187         func_init.spq_map = bp->spq_mapping;
3188         func_init.spq_prod = bp->spq_prod_idx;
3189
3190         bnx2x_func_init(bp, &func_init);
3191
3192         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3193
3194         /*
3195          * Congestion management values depend on the link rate
3196          * There is no active link so initial link rate is set to 10 Gbps.
3197          * When the link comes up The congestion management values are
3198          * re-calculated according to the actual link rate.
3199          */
3200         bp->link_vars.line_speed = SPEED_10000;
3201         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3202
3203         /* Only the PMF sets the HW */
3204         if (bp->port.pmf)
3205                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3206
3207         /* init Event Queue - PCI bus guarantees correct endianity*/
3208         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3209         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3210         eq_data.producer = bp->eq_prod;
3211         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3212         eq_data.sb_id = DEF_SB_ID;
3213         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3214 }
3215
3216 static void bnx2x_e1h_disable(struct bnx2x *bp)
3217 {
3218         int port = BP_PORT(bp);
3219
3220         bnx2x_tx_disable(bp);
3221
3222         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3223 }
3224
3225 static void bnx2x_e1h_enable(struct bnx2x *bp)
3226 {
3227         int port = BP_PORT(bp);
3228
3229         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3230
3231         /* Tx queue should be only re-enabled */
3232         netif_tx_wake_all_queues(bp->dev);
3233
3234         /*
3235          * Should not call netif_carrier_on since it will be called if the link
3236          * is up when checking for link state
3237          */
3238 }
3239
3240 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3241
3242 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3243 {
3244         struct eth_stats_info *ether_stat =
3245                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3246         struct bnx2x_vlan_mac_obj *mac_obj =
3247                 &bp->sp_objs->mac_obj;
3248         int i;
3249
3250         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3251                 ETH_STAT_INFO_VERSION_LEN);
3252
3253         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3254          * mac_local field in ether_stat struct. The base address is offset by 2
3255          * bytes to account for the field being 8 bytes but a mac address is
3256          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3257          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3258          * allocated by the ether_stat struct, so the macs will land in their
3259          * proper positions.
3260          */
3261         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3262                 memset(ether_stat->mac_local + i, 0,
3263                        sizeof(ether_stat->mac_local[0]));
3264         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3265                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3266                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3267                                 ETH_ALEN);
3268         ether_stat->mtu_size = bp->dev->mtu;
3269         if (bp->dev->features & NETIF_F_RXCSUM)
3270                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3271         if (bp->dev->features & NETIF_F_TSO)
3272                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3273         ether_stat->feature_flags |= bp->common.boot_mode;
3274
3275         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3276
3277         ether_stat->txq_size = bp->tx_ring_size;
3278         ether_stat->rxq_size = bp->rx_ring_size;
3279 }
3280
3281 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3282 {
3283         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3284         struct fcoe_stats_info *fcoe_stat =
3285                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3286
3287         if (!CNIC_LOADED(bp))
3288                 return;
3289
3290         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3291
3292         fcoe_stat->qos_priority =
3293                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3294
3295         /* insert FCoE stats from ramrod response */
3296         if (!NO_FCOE(bp)) {
3297                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3298                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3299                         tstorm_queue_statistics;
3300
3301                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3302                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3303                         xstorm_queue_statistics;
3304
3305                 struct fcoe_statistics_params *fw_fcoe_stat =
3306                         &bp->fw_stats_data->fcoe;
3307
3308                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3309                           fcoe_stat->rx_bytes_lo,
3310                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3311
3312                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3313                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3314                           fcoe_stat->rx_bytes_lo,
3315                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3316
3317                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3318                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3319                           fcoe_stat->rx_bytes_lo,
3320                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3321
3322                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3323                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3324                           fcoe_stat->rx_bytes_lo,
3325                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3326
3327                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3328                           fcoe_stat->rx_frames_lo,
3329                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3330
3331                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3332                           fcoe_stat->rx_frames_lo,
3333                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3334
3335                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3336                           fcoe_stat->rx_frames_lo,
3337                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3338
3339                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3340                           fcoe_stat->rx_frames_lo,
3341                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3342
3343                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3344                           fcoe_stat->tx_bytes_lo,
3345                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3346
3347                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3348                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3349                           fcoe_stat->tx_bytes_lo,
3350                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3351
3352                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3353                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3354                           fcoe_stat->tx_bytes_lo,
3355                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3356
3357                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3358                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3359                           fcoe_stat->tx_bytes_lo,
3360                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3361
3362                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3363                           fcoe_stat->tx_frames_lo,
3364                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3365
3366                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3367                           fcoe_stat->tx_frames_lo,
3368                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3369
3370                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3371                           fcoe_stat->tx_frames_lo,
3372                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3373
3374                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3375                           fcoe_stat->tx_frames_lo,
3376                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3377         }
3378
3379         /* ask L5 driver to add data to the struct */
3380         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3381 }
3382
3383 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3384 {
3385         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3386         struct iscsi_stats_info *iscsi_stat =
3387                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3388
3389         if (!CNIC_LOADED(bp))
3390                 return;
3391
3392         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3393                ETH_ALEN);
3394
3395         iscsi_stat->qos_priority =
3396                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3397
3398         /* ask L5 driver to add data to the struct */
3399         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3400 }
3401
3402 /* called due to MCP event (on pmf):
3403  *      reread new bandwidth configuration
3404  *      configure FW
3405  *      notify others function about the change
3406  */
3407 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3408 {
3409         if (bp->link_vars.link_up) {
3410                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3411                 bnx2x_link_sync_notify(bp);
3412         }
3413         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3414 }
3415
3416 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3417 {
3418         bnx2x_config_mf_bw(bp);
3419         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3420 }
3421
3422 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3423 {
3424         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3425         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3426 }
3427
3428 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3429 {
3430         enum drv_info_opcode op_code;
3431         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3432
3433         /* if drv_info version supported by MFW doesn't match - send NACK */
3434         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3435                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3436                 return;
3437         }
3438
3439         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3440                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3441
3442         memset(&bp->slowpath->drv_info_to_mcp, 0,
3443                sizeof(union drv_info_to_mcp));
3444
3445         switch (op_code) {
3446         case ETH_STATS_OPCODE:
3447                 bnx2x_drv_info_ether_stat(bp);
3448                 break;
3449         case FCOE_STATS_OPCODE:
3450                 bnx2x_drv_info_fcoe_stat(bp);
3451                 break;
3452         case ISCSI_STATS_OPCODE:
3453                 bnx2x_drv_info_iscsi_stat(bp);
3454                 break;
3455         default:
3456                 /* if op code isn't supported - send NACK */
3457                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3458                 return;
3459         }
3460
3461         /* if we got drv_info attn from MFW then these fields are defined in
3462          * shmem2 for sure
3463          */
3464         SHMEM2_WR(bp, drv_info_host_addr_lo,
3465                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3466         SHMEM2_WR(bp, drv_info_host_addr_hi,
3467                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3468
3469         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3470 }
3471
3472 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3473 {
3474         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3475
3476         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3477
3478                 /*
3479                  * This is the only place besides the function initialization
3480                  * where the bp->flags can change so it is done without any
3481                  * locks
3482                  */
3483                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3484                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3485                         bp->flags |= MF_FUNC_DIS;
3486
3487                         bnx2x_e1h_disable(bp);
3488                 } else {
3489                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3490                         bp->flags &= ~MF_FUNC_DIS;
3491
3492                         bnx2x_e1h_enable(bp);
3493                 }
3494                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3495         }
3496         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3497                 bnx2x_config_mf_bw(bp);
3498                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3499         }
3500
3501         /* Report results to MCP */
3502         if (dcc_event)
3503                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3504         else
3505                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3506 }
3507
3508 /* must be called under the spq lock */
3509 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3510 {
3511         struct eth_spe *next_spe = bp->spq_prod_bd;
3512
3513         if (bp->spq_prod_bd == bp->spq_last_bd) {
3514                 bp->spq_prod_bd = bp->spq;
3515                 bp->spq_prod_idx = 0;
3516                 DP(BNX2X_MSG_SP, "end of spq\n");
3517         } else {
3518                 bp->spq_prod_bd++;
3519                 bp->spq_prod_idx++;
3520         }
3521         return next_spe;
3522 }
3523
3524 /* must be called under the spq lock */
3525 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3526 {
3527         int func = BP_FUNC(bp);
3528
3529         /*
3530          * Make sure that BD data is updated before writing the producer:
3531          * BD data is written to the memory, the producer is read from the
3532          * memory, thus we need a full memory barrier to ensure the ordering.
3533          */
3534         mb();
3535
3536         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3537                  bp->spq_prod_idx);
3538         mmiowb();
3539 }
3540
3541 /**
3542  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3543  *
3544  * @cmd:        command to check
3545  * @cmd_type:   command type
3546  */
3547 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3548 {
3549         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3550             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3551             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3552             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3553             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3554             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3555             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3556                 return true;
3557         else
3558                 return false;
3559 }
3560
3561 /**
3562  * bnx2x_sp_post - place a single command on an SP ring
3563  *
3564  * @bp:         driver handle
3565  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3566  * @cid:        SW CID the command is related to
3567  * @data_hi:    command private data address (high 32 bits)
3568  * @data_lo:    command private data address (low 32 bits)
3569  * @cmd_type:   command type (e.g. NONE, ETH)
3570  *
3571  * SP data is handled as if it's always an address pair, thus data fields are
3572  * not swapped to little endian in upper functions. Instead this function swaps
3573  * data as if it's two u32 fields.
3574  */
3575 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3576                   u32 data_hi, u32 data_lo, int cmd_type)
3577 {
3578         struct eth_spe *spe;
3579         u16 type;
3580         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3581
3582 #ifdef BNX2X_STOP_ON_ERROR
3583         if (unlikely(bp->panic)) {
3584                 BNX2X_ERR("Can't post SP when there is panic\n");
3585                 return -EIO;
3586         }
3587 #endif
3588
3589         spin_lock_bh(&bp->spq_lock);
3590
3591         if (common) {
3592                 if (!atomic_read(&bp->eq_spq_left)) {
3593                         BNX2X_ERR("BUG! EQ ring full!\n");
3594                         spin_unlock_bh(&bp->spq_lock);
3595                         bnx2x_panic();
3596                         return -EBUSY;
3597                 }
3598         } else if (!atomic_read(&bp->cq_spq_left)) {
3599                         BNX2X_ERR("BUG! SPQ ring full!\n");
3600                         spin_unlock_bh(&bp->spq_lock);
3601                         bnx2x_panic();
3602                         return -EBUSY;
3603         }
3604
3605         spe = bnx2x_sp_get_next(bp);
3606
3607         /* CID needs port number to be encoded int it */
3608         spe->hdr.conn_and_cmd_data =
3609                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3610                                     HW_CID(bp, cid));
3611
3612         type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3613
3614         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3615                  SPE_HDR_FUNCTION_ID);
3616
3617         spe->hdr.type = cpu_to_le16(type);
3618
3619         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3620         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3621
3622         /*
3623          * It's ok if the actual decrement is issued towards the memory
3624          * somewhere between the spin_lock and spin_unlock. Thus no
3625          * more explicit memory barrier is needed.
3626          */
3627         if (common)
3628                 atomic_dec(&bp->eq_spq_left);
3629         else
3630                 atomic_dec(&bp->cq_spq_left);
3631
3632         DP(BNX2X_MSG_SP,
3633            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3634            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3635            (u32)(U64_LO(bp->spq_mapping) +
3636            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3637            HW_CID(bp, cid), data_hi, data_lo, type,
3638            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3639
3640         bnx2x_sp_prod_update(bp);
3641         spin_unlock_bh(&bp->spq_lock);
3642         return 0;
3643 }
3644
3645 /* acquire split MCP access lock register */
3646 static int bnx2x_acquire_alr(struct bnx2x *bp)
3647 {
3648         u32 j, val;
3649         int rc = 0;
3650
3651         might_sleep();
3652         for (j = 0; j < 1000; j++) {
3653                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3654                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3655                 if (val & MCPR_ACCESS_LOCK_LOCK)
3656                         break;
3657
3658                 usleep_range(5000, 10000);
3659         }
3660         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3661                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3662                 rc = -EBUSY;
3663         }
3664
3665         return rc;
3666 }
3667
3668 /* release split MCP access lock register */
3669 static void bnx2x_release_alr(struct bnx2x *bp)
3670 {
3671         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3672 }
3673
3674 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3675 #define BNX2X_DEF_SB_IDX        0x0002
3676
3677 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3678 {
3679         struct host_sp_status_block *def_sb = bp->def_status_blk;
3680         u16 rc = 0;
3681
3682         barrier(); /* status block is written to by the chip */
3683         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3684                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3685                 rc |= BNX2X_DEF_SB_ATT_IDX;
3686         }
3687
3688         if (bp->def_idx != def_sb->sp_sb.running_index) {
3689                 bp->def_idx = def_sb->sp_sb.running_index;
3690                 rc |= BNX2X_DEF_SB_IDX;
3691         }
3692
3693         /* Do not reorder: indices reading should complete before handling */
3694         barrier();
3695         return rc;
3696 }
3697
3698 /*
3699  * slow path service functions
3700  */
3701
3702 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3703 {
3704         int port = BP_PORT(bp);
3705         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3706                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3707         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3708                                        NIG_REG_MASK_INTERRUPT_PORT0;
3709         u32 aeu_mask;
3710         u32 nig_mask = 0;
3711         u32 reg_addr;
3712
3713         if (bp->attn_state & asserted)
3714                 BNX2X_ERR("IGU ERROR\n");
3715
3716         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3717         aeu_mask = REG_RD(bp, aeu_addr);
3718
3719         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3720            aeu_mask, asserted);
3721         aeu_mask &= ~(asserted & 0x3ff);
3722         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3723
3724         REG_WR(bp, aeu_addr, aeu_mask);
3725         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3726
3727         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3728         bp->attn_state |= asserted;
3729         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3730
3731         if (asserted & ATTN_HARD_WIRED_MASK) {
3732                 if (asserted & ATTN_NIG_FOR_FUNC) {
3733
3734                         bnx2x_acquire_phy_lock(bp);
3735
3736                         /* save nig interrupt mask */
3737                         nig_mask = REG_RD(bp, nig_int_mask_addr);
3738
3739                         /* If nig_mask is not set, no need to call the update
3740                          * function.
3741                          */
3742                         if (nig_mask) {
3743                                 REG_WR(bp, nig_int_mask_addr, 0);
3744
3745                                 bnx2x_link_attn(bp);
3746                         }
3747
3748                         /* handle unicore attn? */
3749                 }
3750                 if (asserted & ATTN_SW_TIMER_4_FUNC)
3751                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3752
3753                 if (asserted & GPIO_2_FUNC)
3754                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3755
3756                 if (asserted & GPIO_3_FUNC)
3757                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3758
3759                 if (asserted & GPIO_4_FUNC)
3760                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3761
3762                 if (port == 0) {
3763                         if (asserted & ATTN_GENERAL_ATTN_1) {
3764                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3765                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3766                         }
3767                         if (asserted & ATTN_GENERAL_ATTN_2) {
3768                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3769                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3770                         }
3771                         if (asserted & ATTN_GENERAL_ATTN_3) {
3772                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3773                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3774                         }
3775                 } else {
3776                         if (asserted & ATTN_GENERAL_ATTN_4) {
3777                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3778                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3779                         }
3780                         if (asserted & ATTN_GENERAL_ATTN_5) {
3781                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3782                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3783                         }
3784                         if (asserted & ATTN_GENERAL_ATTN_6) {
3785                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3786                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3787                         }
3788                 }
3789
3790         } /* if hardwired */
3791
3792         if (bp->common.int_block == INT_BLOCK_HC)
3793                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3794                             COMMAND_REG_ATTN_BITS_SET);
3795         else
3796                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3797
3798         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3799            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3800         REG_WR(bp, reg_addr, asserted);
3801
3802         /* now set back the mask */
3803         if (asserted & ATTN_NIG_FOR_FUNC) {
3804                 /* Verify that IGU ack through BAR was written before restoring
3805                  * NIG mask. This loop should exit after 2-3 iterations max.
3806                  */
3807                 if (bp->common.int_block != INT_BLOCK_HC) {
3808                         u32 cnt = 0, igu_acked;
3809                         do {
3810                                 igu_acked = REG_RD(bp,
3811                                                    IGU_REG_ATTENTION_ACK_BITS);
3812                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3813                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
3814                         if (!igu_acked)
3815                                 DP(NETIF_MSG_HW,
3816                                    "Failed to verify IGU ack on time\n");
3817                         barrier();
3818                 }
3819                 REG_WR(bp, nig_int_mask_addr, nig_mask);
3820                 bnx2x_release_phy_lock(bp);
3821         }
3822 }
3823
3824 static void bnx2x_fan_failure(struct bnx2x *bp)
3825 {
3826         int port = BP_PORT(bp);
3827         u32 ext_phy_config;
3828         /* mark the failure */
3829         ext_phy_config =
3830                 SHMEM_RD(bp,
3831                          dev_info.port_hw_config[port].external_phy_config);
3832
3833         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3834         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3835         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3836                  ext_phy_config);
3837
3838         /* log the failure */
3839         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3840                             "Please contact OEM Support for assistance\n");
3841
3842         /* Schedule device reset (unload)
3843          * This is due to some boards consuming sufficient power when driver is
3844          * up to overheat if fan fails.
3845          */
3846         smp_mb__before_clear_bit();
3847         set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3848         smp_mb__after_clear_bit();
3849         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3850 }
3851
3852 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3853 {
3854         int port = BP_PORT(bp);
3855         int reg_offset;
3856         u32 val;
3857
3858         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3859                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3860
3861         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3862
3863                 val = REG_RD(bp, reg_offset);
3864                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3865                 REG_WR(bp, reg_offset, val);
3866
3867                 BNX2X_ERR("SPIO5 hw attention\n");
3868
3869                 /* Fan failure attention */
3870                 bnx2x_hw_reset_phy(&bp->link_params);
3871                 bnx2x_fan_failure(bp);
3872         }
3873
3874         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3875                 bnx2x_acquire_phy_lock(bp);
3876                 bnx2x_handle_module_detect_int(&bp->link_params);
3877                 bnx2x_release_phy_lock(bp);
3878         }
3879
3880         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3881
3882                 val = REG_RD(bp, reg_offset);
3883                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3884                 REG_WR(bp, reg_offset, val);
3885
3886                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3887                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3888                 bnx2x_panic();
3889         }
3890 }
3891
3892 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3893 {
3894         u32 val;
3895
3896         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3897
3898                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3899                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3900                 /* DORQ discard attention */
3901                 if (val & 0x2)
3902                         BNX2X_ERR("FATAL error from DORQ\n");
3903         }
3904
3905         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3906
3907                 int port = BP_PORT(bp);
3908                 int reg_offset;
3909
3910                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3911                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3912
3913                 val = REG_RD(bp, reg_offset);
3914                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3915                 REG_WR(bp, reg_offset, val);
3916
3917                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3918                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3919                 bnx2x_panic();
3920         }
3921 }
3922
3923 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3924 {
3925         u32 val;
3926
3927         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3928
3929                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3930                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3931                 /* CFC error attention */
3932                 if (val & 0x2)
3933                         BNX2X_ERR("FATAL error from CFC\n");
3934         }
3935
3936         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3937                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3938                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3939                 /* RQ_USDMDP_FIFO_OVERFLOW */
3940                 if (val & 0x18000)
3941                         BNX2X_ERR("FATAL error from PXP\n");
3942
3943                 if (!CHIP_IS_E1x(bp)) {
3944                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3945                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3946                 }
3947         }
3948
3949         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3950
3951                 int port = BP_PORT(bp);
3952                 int reg_offset;
3953
3954                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3955                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3956
3957                 val = REG_RD(bp, reg_offset);
3958                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3959                 REG_WR(bp, reg_offset, val);
3960
3961                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3962                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3963                 bnx2x_panic();
3964         }
3965 }
3966
3967 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3968 {
3969         u32 val;
3970
3971         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3972
3973                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3974                         int func = BP_FUNC(bp);
3975
3976                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3977                         bnx2x_read_mf_cfg(bp);
3978                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3979                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3980                         val = SHMEM_RD(bp,
3981                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3982                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3983                                 bnx2x_dcc_event(bp,
3984                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3985
3986                         if (val & DRV_STATUS_SET_MF_BW)
3987                                 bnx2x_set_mf_bw(bp);
3988
3989                         if (val & DRV_STATUS_DRV_INFO_REQ)
3990                                 bnx2x_handle_drv_info_req(bp);
3991
3992                         if (val & DRV_STATUS_VF_DISABLED)
3993                                 bnx2x_vf_handle_flr_event(bp);
3994
3995                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3996                                 bnx2x_pmf_update(bp);
3997
3998                         if (bp->port.pmf &&
3999                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4000                                 bp->dcbx_enabled > 0)
4001                                 /* start dcbx state machine */
4002                                 bnx2x_dcbx_set_params(bp,
4003                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
4004                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
4005                                 bnx2x_handle_afex_cmd(bp,
4006                                         val & DRV_STATUS_AFEX_EVENT_MASK);
4007                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4008                                 bnx2x_handle_eee_event(bp);
4009                         if (bp->link_vars.periodic_flags &
4010                             PERIODIC_FLAGS_LINK_EVENT) {
4011                                 /*  sync with link */
4012                                 bnx2x_acquire_phy_lock(bp);
4013                                 bp->link_vars.periodic_flags &=
4014                                         ~PERIODIC_FLAGS_LINK_EVENT;
4015                                 bnx2x_release_phy_lock(bp);
4016                                 if (IS_MF(bp))
4017                                         bnx2x_link_sync_notify(bp);
4018                                 bnx2x_link_report(bp);
4019                         }
4020                         /* Always call it here: bnx2x_link_report() will
4021                          * prevent the link indication duplication.
4022                          */
4023                         bnx2x__link_status_update(bp);
4024                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4025
4026                         BNX2X_ERR("MC assert!\n");
4027                         bnx2x_mc_assert(bp);
4028                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4029                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4030                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4031                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4032                         bnx2x_panic();
4033
4034                 } else if (attn & BNX2X_MCP_ASSERT) {
4035
4036                         BNX2X_ERR("MCP assert!\n");
4037                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4038                         bnx2x_fw_dump(bp);
4039
4040                 } else
4041                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4042         }
4043
4044         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4045                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4046                 if (attn & BNX2X_GRC_TIMEOUT) {
4047                         val = CHIP_IS_E1(bp) ? 0 :
4048                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4049                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4050                 }
4051                 if (attn & BNX2X_GRC_RSV) {
4052                         val = CHIP_IS_E1(bp) ? 0 :
4053                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4054                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4055                 }
4056                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4057         }
4058 }
4059
4060 /*
4061  * Bits map:
4062  * 0-7   - Engine0 load counter.
4063  * 8-15  - Engine1 load counter.
4064  * 16    - Engine0 RESET_IN_PROGRESS bit.
4065  * 17    - Engine1 RESET_IN_PROGRESS bit.
4066  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4067  *         on the engine
4068  * 19    - Engine1 ONE_IS_LOADED.
4069  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4070  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4071  *         just the one belonging to its engine).
4072  *
4073  */
4074 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4075
4076 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4077 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4078 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4079 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4080 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4081 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4082 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4083
4084 /*
4085  * Set the GLOBAL_RESET bit.
4086  *
4087  * Should be run under rtnl lock
4088  */
4089 void bnx2x_set_reset_global(struct bnx2x *bp)
4090 {
4091         u32 val;
4092         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4093         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4094         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4095         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4096 }
4097
4098 /*
4099  * Clear the GLOBAL_RESET bit.
4100  *
4101  * Should be run under rtnl lock
4102  */
4103 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4104 {
4105         u32 val;
4106         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4107         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4108         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4109         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4110 }
4111
4112 /*
4113  * Checks the GLOBAL_RESET bit.
4114  *
4115  * should be run under rtnl lock
4116  */
4117 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4118 {
4119         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4120
4121         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4122         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4123 }
4124
4125 /*
4126  * Clear RESET_IN_PROGRESS bit for the current engine.
4127  *
4128  * Should be run under rtnl lock
4129  */
4130 static void bnx2x_set_reset_done(struct bnx2x *bp)
4131 {
4132         u32 val;
4133         u32 bit = BP_PATH(bp) ?
4134                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4135         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4136         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4137
4138         /* Clear the bit */
4139         val &= ~bit;
4140         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4141
4142         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4143 }
4144
4145 /*
4146  * Set RESET_IN_PROGRESS for the current engine.
4147  *
4148  * should be run under rtnl lock
4149  */
4150 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4151 {
4152         u32 val;
4153         u32 bit = BP_PATH(bp) ?
4154                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4155         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4156         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4157
4158         /* Set the bit */
4159         val |= bit;
4160         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4161         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4162 }
4163
4164 /*
4165  * Checks the RESET_IN_PROGRESS bit for the given engine.
4166  * should be run under rtnl lock
4167  */
4168 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4169 {
4170         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4171         u32 bit = engine ?
4172                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4173
4174         /* return false if bit is set */
4175         return (val & bit) ? false : true;
4176 }
4177
4178 /*
4179  * set pf load for the current pf.
4180  *
4181  * should be run under rtnl lock
4182  */
4183 void bnx2x_set_pf_load(struct bnx2x *bp)
4184 {
4185         u32 val1, val;
4186         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4187                              BNX2X_PATH0_LOAD_CNT_MASK;
4188         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4189                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4190
4191         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4192         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4193
4194         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4195
4196         /* get the current counter value */
4197         val1 = (val & mask) >> shift;
4198
4199         /* set bit of that PF */
4200         val1 |= (1 << bp->pf_num);
4201
4202         /* clear the old value */
4203         val &= ~mask;
4204
4205         /* set the new one */
4206         val |= ((val1 << shift) & mask);
4207
4208         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4209         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4210 }
4211
4212 /**
4213  * bnx2x_clear_pf_load - clear pf load mark
4214  *
4215  * @bp:         driver handle
4216  *
4217  * Should be run under rtnl lock.
4218  * Decrements the load counter for the current engine. Returns
4219  * whether other functions are still loaded
4220  */
4221 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4222 {
4223         u32 val1, val;
4224         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4225                              BNX2X_PATH0_LOAD_CNT_MASK;
4226         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4227                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4228
4229         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4230         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4231         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4232
4233         /* get the current counter value */
4234         val1 = (val & mask) >> shift;
4235
4236         /* clear bit of that PF */
4237         val1 &= ~(1 << bp->pf_num);
4238
4239         /* clear the old value */
4240         val &= ~mask;
4241
4242         /* set the new one */
4243         val |= ((val1 << shift) & mask);
4244
4245         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4246         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4247         return val1 != 0;
4248 }
4249
4250 /*
4251  * Read the load status for the current engine.
4252  *
4253  * should be run under rtnl lock
4254  */
4255 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4256 {
4257         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4258                              BNX2X_PATH0_LOAD_CNT_MASK);
4259         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4260                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4261         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4262
4263         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4264
4265         val = (val & mask) >> shift;
4266
4267         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4268            engine, val);
4269
4270         return val != 0;
4271 }
4272
4273 static void _print_parity(struct bnx2x *bp, u32 reg)
4274 {
4275         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4276 }
4277
4278 static void _print_next_block(int idx, const char *blk)
4279 {
4280         pr_cont("%s%s", idx ? ", " : "", blk);
4281 }
4282
4283 static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4284                                             int par_num, bool print)
4285 {
4286         int i = 0;
4287         u32 cur_bit = 0;
4288         for (i = 0; sig; i++) {
4289                 cur_bit = ((u32)0x1 << i);
4290                 if (sig & cur_bit) {
4291                         switch (cur_bit) {
4292                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4293                                 if (print) {
4294                                         _print_next_block(par_num++, "BRB");
4295                                         _print_parity(bp,
4296                                                       BRB1_REG_BRB1_PRTY_STS);
4297                                 }
4298                                 break;
4299                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4300                                 if (print) {
4301                                         _print_next_block(par_num++, "PARSER");
4302                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4303                                 }
4304                                 break;
4305                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4306                                 if (print) {
4307                                         _print_next_block(par_num++, "TSDM");
4308                                         _print_parity(bp,
4309                                                       TSDM_REG_TSDM_PRTY_STS);
4310                                 }
4311                                 break;
4312                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4313                                 if (print) {
4314                                         _print_next_block(par_num++,
4315                                                           "SEARCHER");
4316                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4317                                 }
4318                                 break;
4319                         case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4320                                 if (print) {
4321                                         _print_next_block(par_num++, "TCM");
4322                                         _print_parity(bp,
4323                                                       TCM_REG_TCM_PRTY_STS);
4324                                 }
4325                                 break;
4326                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4327                                 if (print) {
4328                                         _print_next_block(par_num++, "TSEMI");
4329                                         _print_parity(bp,
4330                                                       TSEM_REG_TSEM_PRTY_STS_0);
4331                                         _print_parity(bp,
4332                                                       TSEM_REG_TSEM_PRTY_STS_1);
4333                                 }
4334                                 break;
4335                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4336                                 if (print) {
4337                                         _print_next_block(par_num++, "XPB");
4338                                         _print_parity(bp, GRCBASE_XPB +
4339                                                           PB_REG_PB_PRTY_STS);
4340                                 }
4341                                 break;
4342                         }
4343
4344                         /* Clear the bit */
4345                         sig &= ~cur_bit;
4346                 }
4347         }
4348
4349         return par_num;
4350 }
4351
4352 static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4353                                             int par_num, bool *global,
4354                                             bool print)
4355 {
4356         int i = 0;
4357         u32 cur_bit = 0;
4358         for (i = 0; sig; i++) {
4359                 cur_bit = ((u32)0x1 << i);
4360                 if (sig & cur_bit) {
4361                         switch (cur_bit) {
4362                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4363                                 if (print) {
4364                                         _print_next_block(par_num++, "PBF");
4365                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4366                                 }
4367                                 break;
4368                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4369                                 if (print) {
4370                                         _print_next_block(par_num++, "QM");
4371                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4372                                 }
4373                                 break;
4374                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4375                                 if (print) {
4376                                         _print_next_block(par_num++, "TM");
4377                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4378                                 }
4379                                 break;
4380                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4381                                 if (print) {
4382                                         _print_next_block(par_num++, "XSDM");
4383                                         _print_parity(bp,
4384                                                       XSDM_REG_XSDM_PRTY_STS);
4385                                 }
4386                                 break;
4387                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4388                                 if (print) {
4389                                         _print_next_block(par_num++, "XCM");
4390                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4391                                 }
4392                                 break;
4393                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4394                                 if (print) {
4395                                         _print_next_block(par_num++, "XSEMI");
4396                                         _print_parity(bp,
4397                                                       XSEM_REG_XSEM_PRTY_STS_0);
4398                                         _print_parity(bp,
4399                                                       XSEM_REG_XSEM_PRTY_STS_1);
4400                                 }
4401                                 break;
4402                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4403                                 if (print) {
4404                                         _print_next_block(par_num++,
4405                                                           "DOORBELLQ");
4406                                         _print_parity(bp,
4407                                                       DORQ_REG_DORQ_PRTY_STS);
4408                                 }
4409                                 break;
4410                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4411                                 if (print) {
4412                                         _print_next_block(par_num++, "NIG");
4413                                         if (CHIP_IS_E1x(bp)) {
4414                                                 _print_parity(bp,
4415                                                         NIG_REG_NIG_PRTY_STS);
4416                                         } else {
4417                                                 _print_parity(bp,
4418                                                         NIG_REG_NIG_PRTY_STS_0);
4419                                                 _print_parity(bp,
4420                                                         NIG_REG_NIG_PRTY_STS_1);
4421                                         }
4422                                 }
4423                                 break;
4424                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4425                                 if (print)
4426                                         _print_next_block(par_num++,
4427                                                           "VAUX PCI CORE");
4428                                 *global = true;
4429                                 break;
4430                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4431                                 if (print) {
4432                                         _print_next_block(par_num++, "DEBUG");
4433                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4434                                 }
4435                                 break;
4436                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4437                                 if (print) {
4438                                         _print_next_block(par_num++, "USDM");
4439                                         _print_parity(bp,
4440                                                       USDM_REG_USDM_PRTY_STS);
4441                                 }
4442                                 break;
4443                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4444                                 if (print) {
4445                                         _print_next_block(par_num++, "UCM");
4446                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4447                                 }
4448                                 break;
4449                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4450                                 if (print) {
4451                                         _print_next_block(par_num++, "USEMI");
4452                                         _print_parity(bp,
4453                                                       USEM_REG_USEM_PRTY_STS_0);
4454                                         _print_parity(bp,
4455                                                       USEM_REG_USEM_PRTY_STS_1);
4456                                 }
4457                                 break;
4458                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4459                                 if (print) {
4460                                         _print_next_block(par_num++, "UPB");
4461                                         _print_parity(bp, GRCBASE_UPB +
4462                                                           PB_REG_PB_PRTY_STS);
4463                                 }
4464                                 break;
4465                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4466                                 if (print) {
4467                                         _print_next_block(par_num++, "CSDM");
4468                                         _print_parity(bp,
4469                                                       CSDM_REG_CSDM_PRTY_STS);
4470                                 }
4471                                 break;
4472                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4473                                 if (print) {
4474                                         _print_next_block(par_num++, "CCM");
4475                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4476                                 }
4477                                 break;
4478                         }
4479
4480                         /* Clear the bit */
4481                         sig &= ~cur_bit;
4482                 }
4483         }
4484
4485         return par_num;
4486 }
4487
4488 static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4489                                             int par_num, bool print)
4490 {
4491         int i = 0;
4492         u32 cur_bit = 0;
4493         for (i = 0; sig; i++) {
4494                 cur_bit = ((u32)0x1 << i);
4495                 if (sig & cur_bit) {
4496                         switch (cur_bit) {
4497                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4498                                 if (print) {
4499                                         _print_next_block(par_num++, "CSEMI");
4500                                         _print_parity(bp,
4501                                                       CSEM_REG_CSEM_PRTY_STS_0);
4502                                         _print_parity(bp,
4503                                                       CSEM_REG_CSEM_PRTY_STS_1);
4504                                 }
4505                                 break;
4506                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4507                                 if (print) {
4508                                         _print_next_block(par_num++, "PXP");
4509                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4510                                         _print_parity(bp,
4511                                                       PXP2_REG_PXP2_PRTY_STS_0);
4512                                         _print_parity(bp,
4513                                                       PXP2_REG_PXP2_PRTY_STS_1);
4514                                 }
4515                                 break;
4516                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4517                                 if (print)
4518                                         _print_next_block(par_num++,
4519                                         "PXPPCICLOCKCLIENT");
4520                                 break;
4521                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4522                                 if (print) {
4523                                         _print_next_block(par_num++, "CFC");
4524                                         _print_parity(bp,
4525                                                       CFC_REG_CFC_PRTY_STS);
4526                                 }
4527                                 break;
4528                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4529                                 if (print) {
4530                                         _print_next_block(par_num++, "CDU");
4531                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4532                                 }
4533                                 break;
4534                         case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4535                                 if (print) {
4536                                         _print_next_block(par_num++, "DMAE");
4537                                         _print_parity(bp,
4538                                                       DMAE_REG_DMAE_PRTY_STS);
4539                                 }
4540                                 break;
4541                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4542                                 if (print) {
4543                                         _print_next_block(par_num++, "IGU");
4544                                         if (CHIP_IS_E1x(bp))
4545                                                 _print_parity(bp,
4546                                                         HC_REG_HC_PRTY_STS);
4547                                         else
4548                                                 _print_parity(bp,
4549                                                         IGU_REG_IGU_PRTY_STS);
4550                                 }
4551                                 break;
4552                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4553                                 if (print) {
4554                                         _print_next_block(par_num++, "MISC");
4555                                         _print_parity(bp,
4556                                                       MISC_REG_MISC_PRTY_STS);
4557                                 }
4558                                 break;
4559                         }
4560
4561                         /* Clear the bit */
4562                         sig &= ~cur_bit;
4563                 }
4564         }
4565
4566         return par_num;
4567 }
4568
4569 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4570                                            bool *global, bool print)
4571 {
4572         int i = 0;
4573         u32 cur_bit = 0;
4574         for (i = 0; sig; i++) {
4575                 cur_bit = ((u32)0x1 << i);
4576                 if (sig & cur_bit) {
4577                         switch (cur_bit) {
4578                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4579                                 if (print)
4580                                         _print_next_block(par_num++, "MCP ROM");
4581                                 *global = true;
4582                                 break;
4583                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4584                                 if (print)
4585                                         _print_next_block(par_num++,
4586                                                           "MCP UMP RX");
4587                                 *global = true;
4588                                 break;
4589                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4590                                 if (print)
4591                                         _print_next_block(par_num++,
4592                                                           "MCP UMP TX");
4593                                 *global = true;
4594                                 break;
4595                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4596                                 if (print)
4597                                         _print_next_block(par_num++,
4598                                                           "MCP SCPAD");
4599                                 *global = true;
4600                                 break;
4601                         }
4602
4603                         /* Clear the bit */
4604                         sig &= ~cur_bit;
4605                 }
4606         }
4607
4608         return par_num;
4609 }
4610
4611 static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4612                                             int par_num, bool print)
4613 {
4614         int i = 0;
4615         u32 cur_bit = 0;
4616         for (i = 0; sig; i++) {
4617                 cur_bit = ((u32)0x1 << i);
4618                 if (sig & cur_bit) {
4619                         switch (cur_bit) {
4620                         case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4621                                 if (print) {
4622                                         _print_next_block(par_num++, "PGLUE_B");
4623                                         _print_parity(bp,
4624                                                 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4625                                 }
4626                                 break;
4627                         case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4628                                 if (print) {
4629                                         _print_next_block(par_num++, "ATC");
4630                                         _print_parity(bp,
4631                                                       ATC_REG_ATC_PRTY_STS);
4632                                 }
4633                                 break;
4634                         }
4635
4636                         /* Clear the bit */
4637                         sig &= ~cur_bit;
4638                 }
4639         }
4640
4641         return par_num;
4642 }
4643
4644 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4645                               u32 *sig)
4646 {
4647         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4648             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4649             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4650             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4651             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4652                 int par_num = 0;
4653                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4654                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4655                           sig[0] & HW_PRTY_ASSERT_SET_0,
4656                           sig[1] & HW_PRTY_ASSERT_SET_1,
4657                           sig[2] & HW_PRTY_ASSERT_SET_2,
4658                           sig[3] & HW_PRTY_ASSERT_SET_3,
4659                           sig[4] & HW_PRTY_ASSERT_SET_4);
4660                 if (print)
4661                         netdev_err(bp->dev,
4662                                    "Parity errors detected in blocks: ");
4663                 par_num = bnx2x_check_blocks_with_parity0(bp,
4664                         sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4665                 par_num = bnx2x_check_blocks_with_parity1(bp,
4666                         sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4667                 par_num = bnx2x_check_blocks_with_parity2(bp,
4668                         sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4669                 par_num = bnx2x_check_blocks_with_parity3(
4670                         sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4671                 par_num = bnx2x_check_blocks_with_parity4(bp,
4672                         sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4673
4674                 if (print)
4675                         pr_cont("\n");
4676
4677                 return true;
4678         } else
4679                 return false;
4680 }
4681
4682 /**
4683  * bnx2x_chk_parity_attn - checks for parity attentions.
4684  *
4685  * @bp:         driver handle
4686  * @global:     true if there was a global attention
4687  * @print:      show parity attention in syslog
4688  */
4689 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4690 {
4691         struct attn_route attn = { {0} };
4692         int port = BP_PORT(bp);
4693
4694         attn.sig[0] = REG_RD(bp,
4695                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4696                              port*4);
4697         attn.sig[1] = REG_RD(bp,
4698                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4699                              port*4);
4700         attn.sig[2] = REG_RD(bp,
4701                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4702                              port*4);
4703         attn.sig[3] = REG_RD(bp,
4704                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4705                              port*4);
4706         /* Since MCP attentions can't be disabled inside the block, we need to
4707          * read AEU registers to see whether they're currently disabled
4708          */
4709         attn.sig[3] &= ((REG_RD(bp,
4710                                 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
4711                                       : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
4712                          MISC_AEU_ENABLE_MCP_PRTY_BITS) |
4713                         ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
4714
4715         if (!CHIP_IS_E1x(bp))
4716                 attn.sig[4] = REG_RD(bp,
4717                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4718                                      port*4);
4719
4720         return bnx2x_parity_attn(bp, global, print, attn.sig);
4721 }
4722
4723 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4724 {
4725         u32 val;
4726         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4727
4728                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4729                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4730                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4731                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4732                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4733                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4734                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4735                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4736                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4737                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4738                 if (val &
4739                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4740                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4741                 if (val &
4742                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4743                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4744                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4745                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4746                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4747                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4748                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4749                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4750         }
4751         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4752                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4753                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4754                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4755                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4756                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4757                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4758                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4759                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4760                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4761                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4762                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4763                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4764                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4765                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4766         }
4767
4768         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4769                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4770                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4771                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4772                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4773         }
4774 }
4775
4776 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4777 {
4778         struct attn_route attn, *group_mask;
4779         int port = BP_PORT(bp);
4780         int index;
4781         u32 reg_addr;
4782         u32 val;
4783         u32 aeu_mask;
4784         bool global = false;
4785
4786         /* need to take HW lock because MCP or other port might also
4787            try to handle this event */
4788         bnx2x_acquire_alr(bp);
4789
4790         if (bnx2x_chk_parity_attn(bp, &global, true)) {
4791 #ifndef BNX2X_STOP_ON_ERROR
4792                 bp->recovery_state = BNX2X_RECOVERY_INIT;
4793                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4794                 /* Disable HW interrupts */
4795                 bnx2x_int_disable(bp);
4796                 /* In case of parity errors don't handle attentions so that
4797                  * other function would "see" parity errors.
4798                  */
4799 #else
4800                 bnx2x_panic();
4801 #endif
4802                 bnx2x_release_alr(bp);
4803                 return;
4804         }
4805
4806         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4807         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4808         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4809         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4810         if (!CHIP_IS_E1x(bp))
4811                 attn.sig[4] =
4812                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4813         else
4814                 attn.sig[4] = 0;
4815
4816         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4817            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4818
4819         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4820                 if (deasserted & (1 << index)) {
4821                         group_mask = &bp->attn_group[index];
4822
4823                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4824                            index,
4825                            group_mask->sig[0], group_mask->sig[1],
4826                            group_mask->sig[2], group_mask->sig[3],
4827                            group_mask->sig[4]);
4828
4829                         bnx2x_attn_int_deasserted4(bp,
4830                                         attn.sig[4] & group_mask->sig[4]);
4831                         bnx2x_attn_int_deasserted3(bp,
4832                                         attn.sig[3] & group_mask->sig[3]);
4833                         bnx2x_attn_int_deasserted1(bp,
4834                                         attn.sig[1] & group_mask->sig[1]);
4835                         bnx2x_attn_int_deasserted2(bp,
4836                                         attn.sig[2] & group_mask->sig[2]);
4837                         bnx2x_attn_int_deasserted0(bp,
4838                                         attn.sig[0] & group_mask->sig[0]);
4839                 }
4840         }
4841
4842         bnx2x_release_alr(bp);
4843
4844         if (bp->common.int_block == INT_BLOCK_HC)
4845                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4846                             COMMAND_REG_ATTN_BITS_CLR);
4847         else
4848                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4849
4850         val = ~deasserted;
4851         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4852            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4853         REG_WR(bp, reg_addr, val);
4854
4855         if (~bp->attn_state & deasserted)
4856                 BNX2X_ERR("IGU ERROR\n");
4857
4858         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4859                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
4860
4861         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4862         aeu_mask = REG_RD(bp, reg_addr);
4863
4864         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
4865            aeu_mask, deasserted);
4866         aeu_mask |= (deasserted & 0x3ff);
4867         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4868
4869         REG_WR(bp, reg_addr, aeu_mask);
4870         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4871
4872         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4873         bp->attn_state &= ~deasserted;
4874         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4875 }
4876
4877 static void bnx2x_attn_int(struct bnx2x *bp)
4878 {
4879         /* read local copy of bits */
4880         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4881                                                                 attn_bits);
4882         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4883                                                                 attn_bits_ack);
4884         u32 attn_state = bp->attn_state;
4885
4886         /* look for changed bits */
4887         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
4888         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
4889
4890         DP(NETIF_MSG_HW,
4891            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
4892            attn_bits, attn_ack, asserted, deasserted);
4893
4894         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4895                 BNX2X_ERR("BAD attention state\n");
4896
4897         /* handle bits that were raised */
4898         if (asserted)
4899                 bnx2x_attn_int_asserted(bp, asserted);
4900
4901         if (deasserted)
4902                 bnx2x_attn_int_deasserted(bp, deasserted);
4903 }
4904
4905 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4906                       u16 index, u8 op, u8 update)
4907 {
4908         u32 igu_addr = bp->igu_base_addr;
4909         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4910         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4911                              igu_addr);
4912 }
4913
4914 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4915 {
4916         /* No memory barriers */
4917         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4918         mmiowb(); /* keep prod updates ordered */
4919 }
4920
4921 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4922                                       union event_ring_elem *elem)
4923 {
4924         u8 err = elem->message.error;
4925
4926         if (!bp->cnic_eth_dev.starting_cid  ||
4927             (cid < bp->cnic_eth_dev.starting_cid &&
4928             cid != bp->cnic_eth_dev.iscsi_l2_cid))
4929                 return 1;
4930
4931         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4932
4933         if (unlikely(err)) {
4934
4935                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4936                           cid);
4937                 bnx2x_panic_dump(bp, false);
4938         }
4939         bnx2x_cnic_cfc_comp(bp, cid, err);
4940         return 0;
4941 }
4942
4943 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4944 {
4945         struct bnx2x_mcast_ramrod_params rparam;
4946         int rc;
4947
4948         memset(&rparam, 0, sizeof(rparam));
4949
4950         rparam.mcast_obj = &bp->mcast_obj;
4951
4952         netif_addr_lock_bh(bp->dev);
4953
4954         /* Clear pending state for the last command */
4955         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4956
4957         /* If there are pending mcast commands - send them */
4958         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4959                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4960                 if (rc < 0)
4961                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4962                                   rc);
4963         }
4964
4965         netif_addr_unlock_bh(bp->dev);
4966 }
4967
4968 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4969                                             union event_ring_elem *elem)
4970 {
4971         unsigned long ramrod_flags = 0;
4972         int rc = 0;
4973         u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4974         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4975
4976         /* Always push next commands out, don't wait here */
4977         __set_bit(RAMROD_CONT, &ramrod_flags);
4978
4979         switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
4980                             >> BNX2X_SWCID_SHIFT) {
4981         case BNX2X_FILTER_MAC_PENDING:
4982                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4983                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4984                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4985                 else
4986                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4987
4988                 break;
4989         case BNX2X_FILTER_MCAST_PENDING:
4990                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
4991                 /* This is only relevant for 57710 where multicast MACs are
4992                  * configured as unicast MACs using the same ramrod.
4993                  */
4994                 bnx2x_handle_mcast_eqe(bp);
4995                 return;
4996         default:
4997                 BNX2X_ERR("Unsupported classification command: %d\n",
4998                           elem->message.data.eth_event.echo);
4999                 return;
5000         }
5001
5002         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5003
5004         if (rc < 0)
5005                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5006         else if (rc > 0)
5007                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5008 }
5009
5010 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5011
5012 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5013 {
5014         netif_addr_lock_bh(bp->dev);
5015
5016         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5017
5018         /* Send rx_mode command again if was requested */
5019         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5020                 bnx2x_set_storm_rx_mode(bp);
5021         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5022                                     &bp->sp_state))
5023                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5024         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5025                                     &bp->sp_state))
5026                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5027
5028         netif_addr_unlock_bh(bp->dev);
5029 }
5030
5031 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5032                                               union event_ring_elem *elem)
5033 {
5034         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5035                 DP(BNX2X_MSG_SP,
5036                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5037                    elem->message.data.vif_list_event.func_bit_map);
5038                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5039                         elem->message.data.vif_list_event.func_bit_map);
5040         } else if (elem->message.data.vif_list_event.echo ==
5041                    VIF_LIST_RULE_SET) {
5042                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5043                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5044         }
5045 }
5046
5047 /* called with rtnl_lock */
5048 static void bnx2x_after_function_update(struct bnx2x *bp)
5049 {
5050         int q, rc;
5051         struct bnx2x_fastpath *fp;
5052         struct bnx2x_queue_state_params queue_params = {NULL};
5053         struct bnx2x_queue_update_params *q_update_params =
5054                 &queue_params.params.update;
5055
5056         /* Send Q update command with afex vlan removal values for all Qs */
5057         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5058
5059         /* set silent vlan removal values according to vlan mode */
5060         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5061                   &q_update_params->update_flags);
5062         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5063                   &q_update_params->update_flags);
5064         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5065
5066         /* in access mode mark mask and value are 0 to strip all vlans */
5067         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5068                 q_update_params->silent_removal_value = 0;
5069                 q_update_params->silent_removal_mask = 0;
5070         } else {
5071                 q_update_params->silent_removal_value =
5072                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5073                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5074         }
5075
5076         for_each_eth_queue(bp, q) {
5077                 /* Set the appropriate Queue object */
5078                 fp = &bp->fp[q];
5079                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5080
5081                 /* send the ramrod */
5082                 rc = bnx2x_queue_state_change(bp, &queue_params);
5083                 if (rc < 0)
5084                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5085                                   q);
5086         }
5087
5088         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5089                 fp = &bp->fp[FCOE_IDX(bp)];
5090                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5091
5092                 /* clear pending completion bit */
5093                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5094
5095                 /* mark latest Q bit */
5096                 smp_mb__before_clear_bit();
5097                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5098                 smp_mb__after_clear_bit();
5099
5100                 /* send Q update ramrod for FCoE Q */
5101                 rc = bnx2x_queue_state_change(bp, &queue_params);
5102                 if (rc < 0)
5103                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5104                                   q);
5105         } else {
5106                 /* If no FCoE ring - ACK MCP now */
5107                 bnx2x_link_report(bp);
5108                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5109         }
5110 }
5111
5112 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5113         struct bnx2x *bp, u32 cid)
5114 {
5115         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5116
5117         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5118                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5119         else
5120                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5121 }
5122
5123 static void bnx2x_eq_int(struct bnx2x *bp)
5124 {
5125         u16 hw_cons, sw_cons, sw_prod;
5126         union event_ring_elem *elem;
5127         u8 echo;
5128         u32 cid;
5129         u8 opcode;
5130         int rc, spqe_cnt = 0;
5131         struct bnx2x_queue_sp_obj *q_obj;
5132         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5133         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5134
5135         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5136
5137         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5138          * when we get the next-page we need to adjust so the loop
5139          * condition below will be met. The next element is the size of a
5140          * regular element and hence incrementing by 1
5141          */
5142         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5143                 hw_cons++;
5144
5145         /* This function may never run in parallel with itself for a
5146          * specific bp, thus there is no need in "paired" read memory
5147          * barrier here.
5148          */
5149         sw_cons = bp->eq_cons;
5150         sw_prod = bp->eq_prod;
5151
5152         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5153                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5154
5155         for (; sw_cons != hw_cons;
5156               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5157
5158                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5159
5160                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5161                 if (!rc) {
5162                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5163                            rc);
5164                         goto next_spqe;
5165                 }
5166
5167                 /* elem CID originates from FW; actually LE */
5168                 cid = SW_CID((__force __le32)
5169                              elem->message.data.cfc_del_event.cid);
5170                 opcode = elem->message.opcode;
5171
5172                 /* handle eq element */
5173                 switch (opcode) {
5174                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5175                         DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5176                         bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5177                         continue;
5178
5179                 case EVENT_RING_OPCODE_STAT_QUERY:
5180                         DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5181                            "got statistics comp event %d\n",
5182                            bp->stats_comp++);
5183                         /* nothing to do with stats comp */
5184                         goto next_spqe;
5185
5186                 case EVENT_RING_OPCODE_CFC_DEL:
5187                         /* handle according to cid range */
5188                         /*
5189                          * we may want to verify here that the bp state is
5190                          * HALTING
5191                          */
5192                         DP(BNX2X_MSG_SP,
5193                            "got delete ramrod for MULTI[%d]\n", cid);
5194
5195                         if (CNIC_LOADED(bp) &&
5196                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5197                                 goto next_spqe;
5198
5199                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5200
5201                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5202                                 break;
5203
5204                         goto next_spqe;
5205
5206                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5207                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5208                         if (f_obj->complete_cmd(bp, f_obj,
5209                                                 BNX2X_F_CMD_TX_STOP))
5210                                 break;
5211                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5212                         goto next_spqe;
5213
5214                 case EVENT_RING_OPCODE_START_TRAFFIC:
5215                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5216                         if (f_obj->complete_cmd(bp, f_obj,
5217                                                 BNX2X_F_CMD_TX_START))
5218                                 break;
5219                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5220                         goto next_spqe;
5221
5222                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5223                         echo = elem->message.data.function_update_event.echo;
5224                         if (echo == SWITCH_UPDATE) {
5225                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5226                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5227                                 if (f_obj->complete_cmd(
5228                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5229                                         break;
5230
5231                         } else {
5232                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5233                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5234                                 f_obj->complete_cmd(bp, f_obj,
5235                                                     BNX2X_F_CMD_AFEX_UPDATE);
5236
5237                                 /* We will perform the Queues update from
5238                                  * sp_rtnl task as all Queue SP operations
5239                                  * should run under rtnl_lock.
5240                                  */
5241                                 smp_mb__before_clear_bit();
5242                                 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5243                                         &bp->sp_rtnl_state);
5244                                 smp_mb__after_clear_bit();
5245
5246                                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5247                         }
5248
5249                         goto next_spqe;
5250
5251                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5252                         f_obj->complete_cmd(bp, f_obj,
5253                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5254                         bnx2x_after_afex_vif_lists(bp, elem);
5255                         goto next_spqe;
5256                 case EVENT_RING_OPCODE_FUNCTION_START:
5257                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5258                            "got FUNC_START ramrod\n");
5259                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5260                                 break;
5261
5262                         goto next_spqe;
5263
5264                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5265                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5266                            "got FUNC_STOP ramrod\n");
5267                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5268                                 break;
5269
5270                         goto next_spqe;
5271                 }
5272
5273                 switch (opcode | bp->state) {
5274                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5275                       BNX2X_STATE_OPEN):
5276                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5277                       BNX2X_STATE_OPENING_WAIT4_PORT):
5278                         cid = elem->message.data.eth_event.echo &
5279                                 BNX2X_SWCID_MASK;
5280                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5281                            cid);
5282                         rss_raw->clear_pending(rss_raw);
5283                         break;
5284
5285                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5286                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5287                 case (EVENT_RING_OPCODE_SET_MAC |
5288                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5289                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5290                       BNX2X_STATE_OPEN):
5291                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5292                       BNX2X_STATE_DIAG):
5293                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5294                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5295                         DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5296                         bnx2x_handle_classification_eqe(bp, elem);
5297                         break;
5298
5299                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5300                       BNX2X_STATE_OPEN):
5301                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5302                       BNX2X_STATE_DIAG):
5303                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5304                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5305                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5306                         bnx2x_handle_mcast_eqe(bp);
5307                         break;
5308
5309                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5310                       BNX2X_STATE_OPEN):
5311                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5312                       BNX2X_STATE_DIAG):
5313                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5314                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5315                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5316                         bnx2x_handle_rx_mode_eqe(bp);
5317                         break;
5318                 default:
5319                         /* unknown event log error and continue */
5320                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5321                                   elem->message.opcode, bp->state);
5322                 }
5323 next_spqe:
5324                 spqe_cnt++;
5325         } /* for */
5326
5327         smp_mb__before_atomic_inc();
5328         atomic_add(spqe_cnt, &bp->eq_spq_left);
5329
5330         bp->eq_cons = sw_cons;
5331         bp->eq_prod = sw_prod;
5332         /* Make sure that above mem writes were issued towards the memory */
5333         smp_wmb();
5334
5335         /* update producer */
5336         bnx2x_update_eq_prod(bp, bp->eq_prod);
5337 }
5338
5339 static void bnx2x_sp_task(struct work_struct *work)
5340 {
5341         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5342
5343         DP(BNX2X_MSG_SP, "sp task invoked\n");
5344
5345         /* make sure the atomic interrupt_occurred has been written */
5346         smp_rmb();
5347         if (atomic_read(&bp->interrupt_occurred)) {
5348
5349                 /* what work needs to be performed? */
5350                 u16 status = bnx2x_update_dsb_idx(bp);
5351
5352                 DP(BNX2X_MSG_SP, "status %x\n", status);
5353                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5354                 atomic_set(&bp->interrupt_occurred, 0);
5355
5356                 /* HW attentions */
5357                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5358                         bnx2x_attn_int(bp);
5359                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5360                 }
5361
5362                 /* SP events: STAT_QUERY and others */
5363                 if (status & BNX2X_DEF_SB_IDX) {
5364                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5365
5366                 if (FCOE_INIT(bp) &&
5367                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5368                                 /* Prevent local bottom-halves from running as
5369                                  * we are going to change the local NAPI list.
5370                                  */
5371                                 local_bh_disable();
5372                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5373                                 local_bh_enable();
5374                         }
5375
5376                         /* Handle EQ completions */
5377                         bnx2x_eq_int(bp);
5378                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5379                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5380
5381                         status &= ~BNX2X_DEF_SB_IDX;
5382                 }
5383
5384                 /* if status is non zero then perhaps something went wrong */
5385                 if (unlikely(status))
5386                         DP(BNX2X_MSG_SP,
5387                            "got an unknown interrupt! (status 0x%x)\n", status);
5388
5389                 /* ack status block only if something was actually handled */
5390                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5391                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5392         }
5393
5394         /* must be called after the EQ processing (since eq leads to sriov
5395          * ramrod completion flows).
5396          * This flow may have been scheduled by the arrival of a ramrod
5397          * completion, or by the sriov code rescheduling itself.
5398          */
5399         bnx2x_iov_sp_task(bp);
5400
5401         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5402         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5403                                &bp->sp_state)) {
5404                 bnx2x_link_report(bp);
5405                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5406         }
5407 }
5408
5409 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5410 {
5411         struct net_device *dev = dev_instance;
5412         struct bnx2x *bp = netdev_priv(dev);
5413
5414         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5415                      IGU_INT_DISABLE, 0);
5416
5417 #ifdef BNX2X_STOP_ON_ERROR
5418         if (unlikely(bp->panic))
5419                 return IRQ_HANDLED;
5420 #endif
5421
5422         if (CNIC_LOADED(bp)) {
5423                 struct cnic_ops *c_ops;
5424
5425                 rcu_read_lock();
5426                 c_ops = rcu_dereference(bp->cnic_ops);
5427                 if (c_ops)
5428                         c_ops->cnic_handler(bp->cnic_data, NULL);
5429                 rcu_read_unlock();
5430         }
5431
5432         /* schedule sp task to perform default status block work, ack
5433          * attentions and enable interrupts.
5434          */
5435         bnx2x_schedule_sp_task(bp);
5436
5437         return IRQ_HANDLED;
5438 }
5439
5440 /* end of slow path */
5441
5442 void bnx2x_drv_pulse(struct bnx2x *bp)
5443 {
5444         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5445                  bp->fw_drv_pulse_wr_seq);
5446 }
5447
5448 static void bnx2x_timer(unsigned long data)
5449 {
5450         struct bnx2x *bp = (struct bnx2x *) data;
5451
5452         if (!netif_running(bp->dev))
5453                 return;
5454
5455         if (IS_PF(bp) &&
5456             !BP_NOMCP(bp)) {
5457                 int mb_idx = BP_FW_MB_IDX(bp);
5458                 u16 drv_pulse;
5459                 u16 mcp_pulse;
5460
5461                 ++bp->fw_drv_pulse_wr_seq;
5462                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5463                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5464                 bnx2x_drv_pulse(bp);
5465
5466                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5467                              MCP_PULSE_SEQ_MASK);
5468                 /* The delta between driver pulse and mcp response
5469                  * should not get too big. If the MFW is more than 5 pulses
5470                  * behind, we should worry about it enough to generate an error
5471                  * log.
5472                  */
5473                 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5474                         BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5475                                   drv_pulse, mcp_pulse);
5476         }
5477
5478         if (bp->state == BNX2X_STATE_OPEN)
5479                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5480
5481         /* sample pf vf bulletin board for new posts from pf */
5482         if (IS_VF(bp))
5483                 bnx2x_timer_sriov(bp);
5484
5485         mod_timer(&bp->timer, jiffies + bp->current_interval);
5486 }
5487
5488 /* end of Statistics */
5489
5490 /* nic init */
5491
5492 /*
5493  * nic init service functions
5494  */
5495
5496 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5497 {
5498         u32 i;
5499         if (!(len%4) && !(addr%4))
5500                 for (i = 0; i < len; i += 4)
5501                         REG_WR(bp, addr + i, fill);
5502         else
5503                 for (i = 0; i < len; i++)
5504                         REG_WR8(bp, addr + i, fill);
5505 }
5506
5507 /* helper: writes FP SP data to FW - data_size in dwords */
5508 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5509                                 int fw_sb_id,
5510                                 u32 *sb_data_p,
5511                                 u32 data_size)
5512 {
5513         int index;
5514         for (index = 0; index < data_size; index++)
5515                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5516                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5517                         sizeof(u32)*index,
5518                         *(sb_data_p + index));
5519 }
5520
5521 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5522 {
5523         u32 *sb_data_p;
5524         u32 data_size = 0;
5525         struct hc_status_block_data_e2 sb_data_e2;
5526         struct hc_status_block_data_e1x sb_data_e1x;
5527
5528         /* disable the function first */
5529         if (!CHIP_IS_E1x(bp)) {
5530                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5531                 sb_data_e2.common.state = SB_DISABLED;
5532                 sb_data_e2.common.p_func.vf_valid = false;
5533                 sb_data_p = (u32 *)&sb_data_e2;
5534                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5535         } else {
5536                 memset(&sb_data_e1x, 0,
5537                        sizeof(struct hc_status_block_data_e1x));
5538                 sb_data_e1x.common.state = SB_DISABLED;
5539                 sb_data_e1x.common.p_func.vf_valid = false;
5540                 sb_data_p = (u32 *)&sb_data_e1x;
5541                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5542         }
5543         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5544
5545         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5546                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5547                         CSTORM_STATUS_BLOCK_SIZE);
5548         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5549                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5550                         CSTORM_SYNC_BLOCK_SIZE);
5551 }
5552
5553 /* helper:  writes SP SB data to FW */
5554 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5555                 struct hc_sp_status_block_data *sp_sb_data)
5556 {
5557         int func = BP_FUNC(bp);
5558         int i;
5559         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5560                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5561                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5562                         i*sizeof(u32),
5563                         *((u32 *)sp_sb_data + i));
5564 }
5565
5566 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5567 {
5568         int func = BP_FUNC(bp);
5569         struct hc_sp_status_block_data sp_sb_data;
5570         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5571
5572         sp_sb_data.state = SB_DISABLED;
5573         sp_sb_data.p_func.vf_valid = false;
5574
5575         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5576
5577         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5578                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5579                         CSTORM_SP_STATUS_BLOCK_SIZE);
5580         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5581                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5582                         CSTORM_SP_SYNC_BLOCK_SIZE);
5583 }
5584
5585 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5586                                            int igu_sb_id, int igu_seg_id)
5587 {
5588         hc_sm->igu_sb_id = igu_sb_id;
5589         hc_sm->igu_seg_id = igu_seg_id;
5590         hc_sm->timer_value = 0xFF;
5591         hc_sm->time_to_expire = 0xFFFFFFFF;
5592 }
5593
5594 /* allocates state machine ids. */
5595 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5596 {
5597         /* zero out state machine indices */
5598         /* rx indices */
5599         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5600
5601         /* tx indices */
5602         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5603         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5604         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5605         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5606
5607         /* map indices */
5608         /* rx indices */
5609         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5610                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5611
5612         /* tx indices */
5613         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5614                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5615         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5616                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5617         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5618                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5619         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5620                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5621 }
5622
5623 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5624                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5625 {
5626         int igu_seg_id;
5627
5628         struct hc_status_block_data_e2 sb_data_e2;
5629         struct hc_status_block_data_e1x sb_data_e1x;
5630         struct hc_status_block_sm  *hc_sm_p;
5631         int data_size;
5632         u32 *sb_data_p;
5633
5634         if (CHIP_INT_MODE_IS_BC(bp))
5635                 igu_seg_id = HC_SEG_ACCESS_NORM;
5636         else
5637                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5638
5639         bnx2x_zero_fp_sb(bp, fw_sb_id);
5640
5641         if (!CHIP_IS_E1x(bp)) {
5642                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5643                 sb_data_e2.common.state = SB_ENABLED;
5644                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5645                 sb_data_e2.common.p_func.vf_id = vfid;
5646                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5647                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5648                 sb_data_e2.common.same_igu_sb_1b = true;
5649                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5650                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5651                 hc_sm_p = sb_data_e2.common.state_machine;
5652                 sb_data_p = (u32 *)&sb_data_e2;
5653                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5654                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5655         } else {
5656                 memset(&sb_data_e1x, 0,
5657                        sizeof(struct hc_status_block_data_e1x));
5658                 sb_data_e1x.common.state = SB_ENABLED;
5659                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5660                 sb_data_e1x.common.p_func.vf_id = 0xff;
5661                 sb_data_e1x.common.p_func.vf_valid = false;
5662                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5663                 sb_data_e1x.common.same_igu_sb_1b = true;
5664                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5665                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5666                 hc_sm_p = sb_data_e1x.common.state_machine;
5667                 sb_data_p = (u32 *)&sb_data_e1x;
5668                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5669                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5670         }
5671
5672         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5673                                        igu_sb_id, igu_seg_id);
5674         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5675                                        igu_sb_id, igu_seg_id);
5676
5677         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5678
5679         /* write indices to HW - PCI guarantees endianity of regpairs */
5680         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5681 }
5682
5683 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5684                                      u16 tx_usec, u16 rx_usec)
5685 {
5686         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5687                                     false, rx_usec);
5688         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5689                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5690                                        tx_usec);
5691         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5692                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5693                                        tx_usec);
5694         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5695                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5696                                        tx_usec);
5697 }
5698
5699 static void bnx2x_init_def_sb(struct bnx2x *bp)
5700 {
5701         struct host_sp_status_block *def_sb = bp->def_status_blk;
5702         dma_addr_t mapping = bp->def_status_blk_mapping;
5703         int igu_sp_sb_index;
5704         int igu_seg_id;
5705         int port = BP_PORT(bp);
5706         int func = BP_FUNC(bp);
5707         int reg_offset, reg_offset_en5;
5708         u64 section;
5709         int index;
5710         struct hc_sp_status_block_data sp_sb_data;
5711         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5712
5713         if (CHIP_INT_MODE_IS_BC(bp)) {
5714                 igu_sp_sb_index = DEF_SB_IGU_ID;
5715                 igu_seg_id = HC_SEG_ACCESS_DEF;
5716         } else {
5717                 igu_sp_sb_index = bp->igu_dsb_id;
5718                 igu_seg_id = IGU_SEG_ACCESS_DEF;
5719         }
5720
5721         /* ATTN */
5722         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5723                                             atten_status_block);
5724         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5725
5726         bp->attn_state = 0;
5727
5728         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5729                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5730         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5731                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5732         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5733                 int sindex;
5734                 /* take care of sig[0]..sig[4] */
5735                 for (sindex = 0; sindex < 4; sindex++)
5736                         bp->attn_group[index].sig[sindex] =
5737                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5738
5739                 if (!CHIP_IS_E1x(bp))
5740                         /*
5741                          * enable5 is separate from the rest of the registers,
5742                          * and therefore the address skip is 4
5743                          * and not 16 between the different groups
5744                          */
5745                         bp->attn_group[index].sig[4] = REG_RD(bp,
5746                                         reg_offset_en5 + 0x4*index);
5747                 else
5748                         bp->attn_group[index].sig[4] = 0;
5749         }
5750
5751         if (bp->common.int_block == INT_BLOCK_HC) {
5752                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5753                                      HC_REG_ATTN_MSG0_ADDR_L);
5754
5755                 REG_WR(bp, reg_offset, U64_LO(section));
5756                 REG_WR(bp, reg_offset + 4, U64_HI(section));
5757         } else if (!CHIP_IS_E1x(bp)) {
5758                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5759                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5760         }
5761
5762         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5763                                             sp_sb);
5764
5765         bnx2x_zero_sp_sb(bp);
5766
5767         /* PCI guarantees endianity of regpairs */
5768         sp_sb_data.state                = SB_ENABLED;
5769         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
5770         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
5771         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
5772         sp_sb_data.igu_seg_id           = igu_seg_id;
5773         sp_sb_data.p_func.pf_id         = func;
5774         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
5775         sp_sb_data.p_func.vf_id         = 0xff;
5776
5777         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5778
5779         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5780 }
5781
5782 void bnx2x_update_coalesce(struct bnx2x *bp)
5783 {
5784         int i;
5785
5786         for_each_eth_queue(bp, i)
5787                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5788                                          bp->tx_ticks, bp->rx_ticks);
5789 }
5790
5791 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5792 {
5793         spin_lock_init(&bp->spq_lock);
5794         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5795
5796         bp->spq_prod_idx = 0;
5797         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5798         bp->spq_prod_bd = bp->spq;
5799         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5800 }
5801
5802 static void bnx2x_init_eq_ring(struct bnx2x *bp)
5803 {
5804         int i;
5805         for (i = 1; i <= NUM_EQ_PAGES; i++) {
5806                 union event_ring_elem *elem =
5807                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5808
5809                 elem->next_page.addr.hi =
5810                         cpu_to_le32(U64_HI(bp->eq_mapping +
5811                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5812                 elem->next_page.addr.lo =
5813                         cpu_to_le32(U64_LO(bp->eq_mapping +
5814                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5815         }
5816         bp->eq_cons = 0;
5817         bp->eq_prod = NUM_EQ_DESC;
5818         bp->eq_cons_sb = BNX2X_EQ_INDEX;
5819         /* we want a warning message before it gets wrought... */
5820         atomic_set(&bp->eq_spq_left,
5821                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5822 }
5823
5824 /* called with netif_addr_lock_bh() */
5825 int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5826                         unsigned long rx_mode_flags,
5827                         unsigned long rx_accept_flags,
5828                         unsigned long tx_accept_flags,
5829                         unsigned long ramrod_flags)
5830 {
5831         struct bnx2x_rx_mode_ramrod_params ramrod_param;
5832         int rc;
5833
5834         memset(&ramrod_param, 0, sizeof(ramrod_param));
5835
5836         /* Prepare ramrod parameters */
5837         ramrod_param.cid = 0;
5838         ramrod_param.cl_id = cl_id;
5839         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5840         ramrod_param.func_id = BP_FUNC(bp);
5841
5842         ramrod_param.pstate = &bp->sp_state;
5843         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5844
5845         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5846         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5847
5848         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5849
5850         ramrod_param.ramrod_flags = ramrod_flags;
5851         ramrod_param.rx_mode_flags = rx_mode_flags;
5852
5853         ramrod_param.rx_accept_flags = rx_accept_flags;
5854         ramrod_param.tx_accept_flags = tx_accept_flags;
5855
5856         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5857         if (rc < 0) {
5858                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5859                 return rc;
5860         }
5861
5862         return 0;
5863 }
5864
5865 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5866                                    unsigned long *rx_accept_flags,
5867                                    unsigned long *tx_accept_flags)
5868 {
5869         /* Clear the flags first */
5870         *rx_accept_flags = 0;
5871         *tx_accept_flags = 0;
5872
5873         switch (rx_mode) {
5874         case BNX2X_RX_MODE_NONE:
5875                 /*
5876                  * 'drop all' supersedes any accept flags that may have been
5877                  * passed to the function.
5878                  */
5879                 break;
5880         case BNX2X_RX_MODE_NORMAL:
5881                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5882                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5883                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5884
5885                 /* internal switching mode */
5886                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5887                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5888                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5889
5890                 break;
5891         case BNX2X_RX_MODE_ALLMULTI:
5892                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5893                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5894                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5895
5896                 /* internal switching mode */
5897                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5898                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5899                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5900
5901                 break;
5902         case BNX2X_RX_MODE_PROMISC:
5903                 /* According to definition of SI mode, iface in promisc mode
5904                  * should receive matched and unmatched (in resolution of port)
5905                  * unicast packets.
5906                  */
5907                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5908                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5909                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5910                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5911
5912                 /* internal switching mode */
5913                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5914                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5915
5916                 if (IS_MF_SI(bp))
5917                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
5918                 else
5919                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5920
5921                 break;
5922         default:
5923                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5924                 return -EINVAL;
5925         }
5926
5927         /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
5928         if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5929                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5930                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
5931         }
5932
5933         return 0;
5934 }
5935
5936 /* called with netif_addr_lock_bh() */
5937 int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5938 {
5939         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5940         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5941         int rc;
5942
5943         if (!NO_FCOE(bp))
5944                 /* Configure rx_mode of FCoE Queue */
5945                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5946
5947         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5948                                      &tx_accept_flags);
5949         if (rc)
5950                 return rc;
5951
5952         __set_bit(RAMROD_RX, &ramrod_flags);
5953         __set_bit(RAMROD_TX, &ramrod_flags);
5954
5955         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
5956                                    rx_accept_flags, tx_accept_flags,
5957                                    ramrod_flags);
5958 }
5959
5960 static void bnx2x_init_internal_common(struct bnx2x *bp)
5961 {
5962         int i;
5963
5964         if (IS_MF_SI(bp))
5965                 /*
5966                  * In switch independent mode, the TSTORM needs to accept
5967                  * packets that failed classification, since approximate match
5968                  * mac addresses aren't written to NIG LLH
5969                  */
5970                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5971                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5972         else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5973                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5974                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5975
5976         /* Zero this manually as its initialization is
5977            currently missing in the initTool */
5978         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5979                 REG_WR(bp, BAR_USTRORM_INTMEM +
5980                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5981         if (!CHIP_IS_E1x(bp)) {
5982                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5983                         CHIP_INT_MODE_IS_BC(bp) ?
5984                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5985         }
5986 }
5987
5988 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5989 {
5990         switch (load_code) {
5991         case FW_MSG_CODE_DRV_LOAD_COMMON:
5992         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5993                 bnx2x_init_internal_common(bp);
5994                 /* no break */
5995
5996         case FW_MSG_CODE_DRV_LOAD_PORT:
5997                 /* nothing to do */
5998                 /* no break */
5999
6000         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6001                 /* internal memory per function is
6002                    initialized inside bnx2x_pf_init */
6003                 break;
6004
6005         default:
6006                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6007                 break;
6008         }
6009 }
6010
6011 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6012 {
6013         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6014 }
6015
6016 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6017 {
6018         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6019 }
6020
6021 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6022 {
6023         if (CHIP_IS_E1x(fp->bp))
6024                 return BP_L_ID(fp->bp) + fp->index;
6025         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6026                 return bnx2x_fp_igu_sb_id(fp);
6027 }
6028
6029 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6030 {
6031         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6032         u8 cos;
6033         unsigned long q_type = 0;
6034         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6035         fp->rx_queue = fp_idx;
6036         fp->cid = fp_idx;
6037         fp->cl_id = bnx2x_fp_cl_id(fp);
6038         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6039         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6040         /* qZone id equals to FW (per path) client id */
6041         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6042
6043         /* init shortcut */
6044         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6045
6046         /* Setup SB indices */
6047         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6048
6049         /* Configure Queue State object */
6050         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6051         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6052
6053         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6054
6055         /* init tx data */
6056         for_each_cos_in_tx_queue(fp, cos) {
6057                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6058                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6059                                   FP_COS_TO_TXQ(fp, cos, bp),
6060                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6061                 cids[cos] = fp->txdata_ptr[cos]->cid;
6062         }
6063
6064         /* nothing more for vf to do here */
6065         if (IS_VF(bp))
6066                 return;
6067
6068         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6069                       fp->fw_sb_id, fp->igu_sb_id);
6070         bnx2x_update_fpsb_idx(fp);
6071         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6072                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6073                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6074
6075         /**
6076          * Configure classification DBs: Always enable Tx switching
6077          */
6078         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6079
6080         DP(NETIF_MSG_IFUP,
6081            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6082            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6083            fp->igu_sb_id);
6084 }
6085
6086 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6087 {
6088         int i;
6089
6090         for (i = 1; i <= NUM_TX_RINGS; i++) {
6091                 struct eth_tx_next_bd *tx_next_bd =
6092                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6093
6094                 tx_next_bd->addr_hi =
6095                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6096                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6097                 tx_next_bd->addr_lo =
6098                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6099                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6100         }
6101
6102         *txdata->tx_cons_sb = cpu_to_le16(0);
6103
6104         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6105         txdata->tx_db.data.zero_fill1 = 0;
6106         txdata->tx_db.data.prod = 0;
6107
6108         txdata->tx_pkt_prod = 0;
6109         txdata->tx_pkt_cons = 0;
6110         txdata->tx_bd_prod = 0;
6111         txdata->tx_bd_cons = 0;
6112         txdata->tx_pkt = 0;
6113 }
6114
6115 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6116 {
6117         int i;
6118
6119         for_each_tx_queue_cnic(bp, i)
6120                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6121 }
6122
6123 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6124 {
6125         int i;
6126         u8 cos;
6127
6128         for_each_eth_queue(bp, i)
6129                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6130                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6131 }
6132
6133 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6134 {
6135         if (!NO_FCOE(bp))
6136                 bnx2x_init_fcoe_fp(bp);
6137
6138         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6139                       BNX2X_VF_ID_INVALID, false,
6140                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6141
6142         /* ensure status block indices were read */
6143         rmb();
6144         bnx2x_init_rx_rings_cnic(bp);
6145         bnx2x_init_tx_rings_cnic(bp);
6146
6147         /* flush all */
6148         mb();
6149         mmiowb();
6150 }
6151
6152 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6153 {
6154         int i;
6155
6156         /* Setup NIC internals and enable interrupts */
6157         for_each_eth_queue(bp, i)
6158                 bnx2x_init_eth_fp(bp, i);
6159
6160         /* ensure status block indices were read */
6161         rmb();
6162         bnx2x_init_rx_rings(bp);
6163         bnx2x_init_tx_rings(bp);
6164
6165         if (IS_PF(bp)) {
6166                 /* Initialize MOD_ABS interrupts */
6167                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6168                                        bp->common.shmem_base,
6169                                        bp->common.shmem2_base, BP_PORT(bp));
6170
6171                 /* initialize the default status block and sp ring */
6172                 bnx2x_init_def_sb(bp);
6173                 bnx2x_update_dsb_idx(bp);
6174                 bnx2x_init_sp_ring(bp);
6175         } else {
6176                 bnx2x_memset_stats(bp);
6177         }
6178 }
6179
6180 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6181 {
6182         bnx2x_init_eq_ring(bp);
6183         bnx2x_init_internal(bp, load_code);
6184         bnx2x_pf_init(bp);
6185         bnx2x_stats_init(bp);
6186
6187         /* flush all before enabling interrupts */
6188         mb();
6189         mmiowb();
6190
6191         bnx2x_int_enable(bp);
6192
6193         /* Check for SPIO5 */
6194         bnx2x_attn_int_deasserted0(bp,
6195                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6196                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6197 }
6198
6199 /* gzip service functions */
6200 static int bnx2x_gunzip_init(struct bnx2x *bp)
6201 {
6202         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6203                                             &bp->gunzip_mapping, GFP_KERNEL);
6204         if (bp->gunzip_buf  == NULL)
6205                 goto gunzip_nomem1;
6206
6207         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6208         if (bp->strm  == NULL)
6209                 goto gunzip_nomem2;
6210
6211         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6212         if (bp->strm->workspace == NULL)
6213                 goto gunzip_nomem3;
6214
6215         return 0;
6216
6217 gunzip_nomem3:
6218         kfree(bp->strm);
6219         bp->strm = NULL;
6220
6221 gunzip_nomem2:
6222         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6223                           bp->gunzip_mapping);
6224         bp->gunzip_buf = NULL;
6225
6226 gunzip_nomem1:
6227         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6228         return -ENOMEM;
6229 }
6230
6231 static void bnx2x_gunzip_end(struct bnx2x *bp)
6232 {
6233         if (bp->strm) {
6234                 vfree(bp->strm->workspace);
6235                 kfree(bp->strm);
6236                 bp->strm = NULL;
6237         }
6238
6239         if (bp->gunzip_buf) {
6240                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6241                                   bp->gunzip_mapping);
6242                 bp->gunzip_buf = NULL;
6243         }
6244 }
6245
6246 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6247 {
6248         int n, rc;
6249
6250         /* check gzip header */
6251         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6252                 BNX2X_ERR("Bad gzip header\n");
6253                 return -EINVAL;
6254         }
6255
6256         n = 10;
6257
6258 #define FNAME                           0x8
6259
6260         if (zbuf[3] & FNAME)
6261                 while ((zbuf[n++] != 0) && (n < len));
6262
6263         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6264         bp->strm->avail_in = len - n;
6265         bp->strm->next_out = bp->gunzip_buf;
6266         bp->strm->avail_out = FW_BUF_SIZE;
6267
6268         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6269         if (rc != Z_OK)
6270                 return rc;
6271
6272         rc = zlib_inflate(bp->strm, Z_FINISH);
6273         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6274                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6275                            bp->strm->msg);
6276
6277         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6278         if (bp->gunzip_outlen & 0x3)
6279                 netdev_err(bp->dev,
6280                            "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6281                                 bp->gunzip_outlen);
6282         bp->gunzip_outlen >>= 2;
6283
6284         zlib_inflateEnd(bp->strm);
6285
6286         if (rc == Z_STREAM_END)
6287                 return 0;
6288
6289         return rc;
6290 }
6291
6292 /* nic load/unload */
6293
6294 /*
6295  * General service functions
6296  */
6297
6298 /* send a NIG loopback debug packet */
6299 static void bnx2x_lb_pckt(struct bnx2x *bp)
6300 {
6301         u32 wb_write[3];
6302
6303         /* Ethernet source and destination addresses */
6304         wb_write[0] = 0x55555555;
6305         wb_write[1] = 0x55555555;
6306         wb_write[2] = 0x20;             /* SOP */
6307         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6308
6309         /* NON-IP protocol */
6310         wb_write[0] = 0x09000000;
6311         wb_write[1] = 0x55555555;
6312         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6313         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6314 }
6315
6316 /* some of the internal memories
6317  * are not directly readable from the driver
6318  * to test them we send debug packets
6319  */
6320 static int bnx2x_int_mem_test(struct bnx2x *bp)
6321 {
6322         int factor;
6323         int count, i;
6324         u32 val = 0;
6325
6326         if (CHIP_REV_IS_FPGA(bp))
6327                 factor = 120;
6328         else if (CHIP_REV_IS_EMUL(bp))
6329                 factor = 200;
6330         else
6331                 factor = 1;
6332
6333         /* Disable inputs of parser neighbor blocks */
6334         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6335         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6336         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6337         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6338
6339         /*  Write 0 to parser credits for CFC search request */
6340         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6341
6342         /* send Ethernet packet */
6343         bnx2x_lb_pckt(bp);
6344
6345         /* TODO do i reset NIG statistic? */
6346         /* Wait until NIG register shows 1 packet of size 0x10 */
6347         count = 1000 * factor;
6348         while (count) {
6349
6350                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6351                 val = *bnx2x_sp(bp, wb_data[0]);
6352                 if (val == 0x10)
6353                         break;
6354
6355                 usleep_range(10000, 20000);
6356                 count--;
6357         }
6358         if (val != 0x10) {
6359                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6360                 return -1;
6361         }
6362
6363         /* Wait until PRS register shows 1 packet */
6364         count = 1000 * factor;
6365         while (count) {
6366                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6367                 if (val == 1)
6368                         break;
6369
6370                 usleep_range(10000, 20000);
6371                 count--;
6372         }
6373         if (val != 0x1) {
6374                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6375                 return -2;
6376         }
6377
6378         /* Reset and init BRB, PRS */
6379         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6380         msleep(50);
6381         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6382         msleep(50);
6383         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6384         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6385
6386         DP(NETIF_MSG_HW, "part2\n");
6387
6388         /* Disable inputs of parser neighbor blocks */
6389         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6390         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6391         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6392         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6393
6394         /* Write 0 to parser credits for CFC search request */
6395         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6396
6397         /* send 10 Ethernet packets */
6398         for (i = 0; i < 10; i++)
6399                 bnx2x_lb_pckt(bp);
6400
6401         /* Wait until NIG register shows 10 + 1
6402            packets of size 11*0x10 = 0xb0 */
6403         count = 1000 * factor;
6404         while (count) {
6405
6406                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6407                 val = *bnx2x_sp(bp, wb_data[0]);
6408                 if (val == 0xb0)
6409                         break;
6410
6411                 usleep_range(10000, 20000);
6412                 count--;
6413         }
6414         if (val != 0xb0) {
6415                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6416                 return -3;
6417         }
6418
6419         /* Wait until PRS register shows 2 packets */
6420         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6421         if (val != 2)
6422                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6423
6424         /* Write 1 to parser credits for CFC search request */
6425         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6426
6427         /* Wait until PRS register shows 3 packets */
6428         msleep(10 * factor);
6429         /* Wait until NIG register shows 1 packet of size 0x10 */
6430         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6431         if (val != 3)
6432                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6433
6434         /* clear NIG EOP FIFO */
6435         for (i = 0; i < 11; i++)
6436                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6437         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6438         if (val != 1) {
6439                 BNX2X_ERR("clear of NIG failed\n");
6440                 return -4;
6441         }
6442
6443         /* Reset and init BRB, PRS, NIG */
6444         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6445         msleep(50);
6446         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6447         msleep(50);
6448         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6449         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6450         if (!CNIC_SUPPORT(bp))
6451                 /* set NIC mode */
6452                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6453
6454         /* Enable inputs of parser neighbor blocks */
6455         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6456         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6457         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6458         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6459
6460         DP(NETIF_MSG_HW, "done\n");
6461
6462         return 0; /* OK */
6463 }
6464
6465 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6466 {
6467         u32 val;
6468
6469         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6470         if (!CHIP_IS_E1x(bp))
6471                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6472         else
6473                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6474         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6475         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6476         /*
6477          * mask read length error interrupts in brb for parser
6478          * (parsing unit and 'checksum and crc' unit)
6479          * these errors are legal (PU reads fixed length and CAC can cause
6480          * read length error on truncated packets)
6481          */
6482         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6483         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6484         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6485         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6486         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6487         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6488 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6489 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6490         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6491         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6492         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6493 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6494 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6495         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6496         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6497         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6498         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6499 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6500 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6501
6502         val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
6503                 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6504                 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6505         if (!CHIP_IS_E1x(bp))
6506                 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6507                         PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6508         REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6509
6510         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6511         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6512         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6513 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6514
6515         if (!CHIP_IS_E1x(bp))
6516                 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6517                 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6518
6519         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6520         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6521 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6522         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
6523 }
6524
6525 static void bnx2x_reset_common(struct bnx2x *bp)
6526 {
6527         u32 val = 0x1400;
6528
6529         /* reset_common */
6530         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6531                0xd3ffff7f);
6532
6533         if (CHIP_IS_E3(bp)) {
6534                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6535                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6536         }
6537
6538         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6539 }
6540
6541 static void bnx2x_setup_dmae(struct bnx2x *bp)
6542 {
6543         bp->dmae_ready = 0;
6544         spin_lock_init(&bp->dmae_lock);
6545 }
6546
6547 static void bnx2x_init_pxp(struct bnx2x *bp)
6548 {
6549         u16 devctl;
6550         int r_order, w_order;
6551
6552         pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6553         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6554         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6555         if (bp->mrrs == -1)
6556                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6557         else {
6558                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6559                 r_order = bp->mrrs;
6560         }
6561
6562         bnx2x_init_pxp_arb(bp, r_order, w_order);
6563 }
6564
6565 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6566 {
6567         int is_required;
6568         u32 val;
6569         int port;
6570
6571         if (BP_NOMCP(bp))
6572                 return;
6573
6574         is_required = 0;
6575         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6576               SHARED_HW_CFG_FAN_FAILURE_MASK;
6577
6578         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6579                 is_required = 1;
6580
6581         /*
6582          * The fan failure mechanism is usually related to the PHY type since
6583          * the power consumption of the board is affected by the PHY. Currently,
6584          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6585          */
6586         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6587                 for (port = PORT_0; port < PORT_MAX; port++) {
6588                         is_required |=
6589                                 bnx2x_fan_failure_det_req(
6590                                         bp,
6591                                         bp->common.shmem_base,
6592                                         bp->common.shmem2_base,
6593                                         port);
6594                 }
6595
6596         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6597
6598         if (is_required == 0)
6599                 return;
6600
6601         /* Fan failure is indicated by SPIO 5 */
6602         bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6603
6604         /* set to active low mode */
6605         val = REG_RD(bp, MISC_REG_SPIO_INT);
6606         val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6607         REG_WR(bp, MISC_REG_SPIO_INT, val);
6608
6609         /* enable interrupt to signal the IGU */
6610         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6611         val |= MISC_SPIO_SPIO5;
6612         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6613 }
6614
6615 void bnx2x_pf_disable(struct bnx2x *bp)
6616 {
6617         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6618         val &= ~IGU_PF_CONF_FUNC_EN;
6619
6620         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6621         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6622         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6623 }
6624
6625 static void bnx2x__common_init_phy(struct bnx2x *bp)
6626 {
6627         u32 shmem_base[2], shmem2_base[2];
6628         /* Avoid common init in case MFW supports LFA */
6629         if (SHMEM2_RD(bp, size) >
6630             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6631                 return;
6632         shmem_base[0] =  bp->common.shmem_base;
6633         shmem2_base[0] = bp->common.shmem2_base;
6634         if (!CHIP_IS_E1x(bp)) {
6635                 shmem_base[1] =
6636                         SHMEM2_RD(bp, other_shmem_base_addr);
6637                 shmem2_base[1] =
6638                         SHMEM2_RD(bp, other_shmem2_base_addr);
6639         }
6640         bnx2x_acquire_phy_lock(bp);
6641         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6642                               bp->common.chip_id);
6643         bnx2x_release_phy_lock(bp);
6644 }
6645
6646 /**
6647  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6648  *
6649  * @bp:         driver handle
6650  */
6651 static int bnx2x_init_hw_common(struct bnx2x *bp)
6652 {
6653         u32 val;
6654
6655         DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
6656
6657         /*
6658          * take the RESET lock to protect undi_unload flow from accessing
6659          * registers while we're resetting the chip
6660          */
6661         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6662
6663         bnx2x_reset_common(bp);
6664         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6665
6666         val = 0xfffc;
6667         if (CHIP_IS_E3(bp)) {
6668                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6669                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6670         }
6671         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6672
6673         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6674
6675         bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6676
6677         if (!CHIP_IS_E1x(bp)) {
6678                 u8 abs_func_id;
6679
6680                 /**
6681                  * 4-port mode or 2-port mode we need to turn of master-enable
6682                  * for everyone, after that, turn it back on for self.
6683                  * so, we disregard multi-function or not, and always disable
6684                  * for all functions on the given path, this means 0,2,4,6 for
6685                  * path 0 and 1,3,5,7 for path 1
6686                  */
6687                 for (abs_func_id = BP_PATH(bp);
6688                      abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6689                         if (abs_func_id == BP_ABS_FUNC(bp)) {
6690                                 REG_WR(bp,
6691                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6692                                     1);
6693                                 continue;
6694                         }
6695
6696                         bnx2x_pretend_func(bp, abs_func_id);
6697                         /* clear pf enable */
6698                         bnx2x_pf_disable(bp);
6699                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6700                 }
6701         }
6702
6703         bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6704         if (CHIP_IS_E1(bp)) {
6705                 /* enable HW interrupt from PXP on USDM overflow
6706                    bit 16 on INT_MASK_0 */
6707                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6708         }
6709
6710         bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6711         bnx2x_init_pxp(bp);
6712
6713 #ifdef __BIG_ENDIAN
6714         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6715         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6716         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6717         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6718         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6719         /* make sure this value is 0 */
6720         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6721
6722 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6723         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6724         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6725         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6726         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6727 #endif
6728
6729         bnx2x_ilt_init_page_size(bp, INITOP_SET);
6730
6731         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6732                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6733
6734         /* let the HW do it's magic ... */
6735         msleep(100);
6736         /* finish PXP init */
6737         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6738         if (val != 1) {
6739                 BNX2X_ERR("PXP2 CFG failed\n");
6740                 return -EBUSY;
6741         }
6742         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6743         if (val != 1) {
6744                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6745                 return -EBUSY;
6746         }
6747
6748         /* Timers bug workaround E2 only. We need to set the entire ILT to
6749          * have entries with value "0" and valid bit on.
6750          * This needs to be done by the first PF that is loaded in a path
6751          * (i.e. common phase)
6752          */
6753         if (!CHIP_IS_E1x(bp)) {
6754 /* In E2 there is a bug in the timers block that can cause function 6 / 7
6755  * (i.e. vnic3) to start even if it is marked as "scan-off".
6756  * This occurs when a different function (func2,3) is being marked
6757  * as "scan-off". Real-life scenario for example: if a driver is being
6758  * load-unloaded while func6,7 are down. This will cause the timer to access
6759  * the ilt, translate to a logical address and send a request to read/write.
6760  * Since the ilt for the function that is down is not valid, this will cause
6761  * a translation error which is unrecoverable.
6762  * The Workaround is intended to make sure that when this happens nothing fatal
6763  * will occur. The workaround:
6764  *      1.  First PF driver which loads on a path will:
6765  *              a.  After taking the chip out of reset, by using pretend,
6766  *                  it will write "0" to the following registers of
6767  *                  the other vnics.
6768  *                  REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6769  *                  REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6770  *                  REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6771  *                  And for itself it will write '1' to
6772  *                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6773  *                  dmae-operations (writing to pram for example.)
6774  *                  note: can be done for only function 6,7 but cleaner this
6775  *                        way.
6776  *              b.  Write zero+valid to the entire ILT.
6777  *              c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
6778  *                  VNIC3 (of that port). The range allocated will be the
6779  *                  entire ILT. This is needed to prevent  ILT range error.
6780  *      2.  Any PF driver load flow:
6781  *              a.  ILT update with the physical addresses of the allocated
6782  *                  logical pages.
6783  *              b.  Wait 20msec. - note that this timeout is needed to make
6784  *                  sure there are no requests in one of the PXP internal
6785  *                  queues with "old" ILT addresses.
6786  *              c.  PF enable in the PGLC.
6787  *              d.  Clear the was_error of the PF in the PGLC. (could have
6788  *                  occurred while driver was down)
6789  *              e.  PF enable in the CFC (WEAK + STRONG)
6790  *              f.  Timers scan enable
6791  *      3.  PF driver unload flow:
6792  *              a.  Clear the Timers scan_en.
6793  *              b.  Polling for scan_on=0 for that PF.
6794  *              c.  Clear the PF enable bit in the PXP.
6795  *              d.  Clear the PF enable in the CFC (WEAK + STRONG)
6796  *              e.  Write zero+valid to all ILT entries (The valid bit must
6797  *                  stay set)
6798  *              f.  If this is VNIC 3 of a port then also init
6799  *                  first_timers_ilt_entry to zero and last_timers_ilt_entry
6800  *                  to the last entry in the ILT.
6801  *
6802  *      Notes:
6803  *      Currently the PF error in the PGLC is non recoverable.
6804  *      In the future the there will be a recovery routine for this error.
6805  *      Currently attention is masked.
6806  *      Having an MCP lock on the load/unload process does not guarantee that
6807  *      there is no Timer disable during Func6/7 enable. This is because the
6808  *      Timers scan is currently being cleared by the MCP on FLR.
6809  *      Step 2.d can be done only for PF6/7 and the driver can also check if
6810  *      there is error before clearing it. But the flow above is simpler and
6811  *      more general.
6812  *      All ILT entries are written by zero+valid and not just PF6/7
6813  *      ILT entries since in the future the ILT entries allocation for
6814  *      PF-s might be dynamic.
6815  */
6816                 struct ilt_client_info ilt_cli;
6817                 struct bnx2x_ilt ilt;
6818                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6819                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6820
6821                 /* initialize dummy TM client */
6822                 ilt_cli.start = 0;
6823                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6824                 ilt_cli.client_num = ILT_CLIENT_TM;
6825
6826                 /* Step 1: set zeroes to all ilt page entries with valid bit on
6827                  * Step 2: set the timers first/last ilt entry to point
6828                  * to the entire range to prevent ILT range error for 3rd/4th
6829                  * vnic (this code assumes existence of the vnic)
6830                  *
6831                  * both steps performed by call to bnx2x_ilt_client_init_op()
6832                  * with dummy TM client
6833                  *
6834                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6835                  * and his brother are split registers
6836                  */
6837                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6838                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6839                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6840
6841                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6842                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6843                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6844         }
6845
6846         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6847         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6848
6849         if (!CHIP_IS_E1x(bp)) {
6850                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6851                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6852                 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6853
6854                 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6855
6856                 /* let the HW do it's magic ... */
6857                 do {
6858                         msleep(200);
6859                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6860                 } while (factor-- && (val != 1));
6861
6862                 if (val != 1) {
6863                         BNX2X_ERR("ATC_INIT failed\n");
6864                         return -EBUSY;
6865                 }
6866         }
6867
6868         bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6869
6870         bnx2x_iov_init_dmae(bp);
6871
6872         /* clean the DMAE memory */
6873         bp->dmae_ready = 1;
6874         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6875
6876         bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6877
6878         bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6879
6880         bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6881
6882         bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6883
6884         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6885         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6886         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6887         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6888
6889         bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6890
6891         /* QM queues pointers table */
6892         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6893
6894         /* soft reset pulse */
6895         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6896         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6897
6898         if (CNIC_SUPPORT(bp))
6899                 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6900
6901         bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6902
6903         if (!CHIP_REV_IS_SLOW(bp))
6904                 /* enable hw interrupt from doorbell Q */
6905                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6906
6907         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6908
6909         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6910         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6911
6912         if (!CHIP_IS_E1(bp))
6913                 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6914
6915         if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6916                 if (IS_MF_AFEX(bp)) {
6917                         /* configure that VNTag and VLAN headers must be
6918                          * received in afex mode
6919                          */
6920                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6921                         REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6922                         REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6923                         REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6924                         REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6925                 } else {
6926                         /* Bit-map indicating which L2 hdrs may appear
6927                          * after the basic Ethernet header
6928                          */
6929                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6930                                bp->path_has_ovlan ? 7 : 6);
6931                 }
6932         }
6933
6934         bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6935         bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6936         bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6937         bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6938
6939         if (!CHIP_IS_E1x(bp)) {
6940                 /* reset VFC memories */
6941                 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6942                            VFC_MEMORIES_RST_REG_CAM_RST |
6943                            VFC_MEMORIES_RST_REG_RAM_RST);
6944                 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6945                            VFC_MEMORIES_RST_REG_CAM_RST |
6946                            VFC_MEMORIES_RST_REG_RAM_RST);
6947
6948                 msleep(20);
6949         }
6950
6951         bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6952         bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6953         bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6954         bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6955
6956         /* sync semi rtc */
6957         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6958                0x80000000);
6959         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6960                0x80000000);
6961
6962         bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6963         bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6964         bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6965
6966         if (!CHIP_IS_E1x(bp)) {
6967                 if (IS_MF_AFEX(bp)) {
6968                         /* configure that VNTag and VLAN headers must be
6969                          * sent in afex mode
6970                          */
6971                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6972                         REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6973                         REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6974                         REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6975                         REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6976                 } else {
6977                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6978                                bp->path_has_ovlan ? 7 : 6);
6979                 }
6980         }
6981
6982         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6983
6984         bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6985
6986         if (CNIC_SUPPORT(bp)) {
6987                 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6988                 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6989                 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6990                 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6991                 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6992                 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6993                 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6994                 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6995                 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6996                 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6997         }
6998         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6999
7000         if (sizeof(union cdu_context) != 1024)
7001                 /* we currently assume that a context is 1024 bytes */
7002                 dev_alert(&bp->pdev->dev,
7003                           "please adjust the size of cdu_context(%ld)\n",
7004                           (long)sizeof(union cdu_context));
7005
7006         bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7007         val = (4 << 24) + (0 << 12) + 1024;
7008         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7009
7010         bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7011         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7012         /* enable context validation interrupt from CFC */
7013         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7014
7015         /* set the thresholds to prevent CFC/CDU race */
7016         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7017
7018         bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7019
7020         if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7021                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7022
7023         bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7024         bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7025
7026         /* Reset PCIE errors for debug */
7027         REG_WR(bp, 0x2814, 0xffffffff);
7028         REG_WR(bp, 0x3820, 0xffffffff);
7029
7030         if (!CHIP_IS_E1x(bp)) {
7031                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7032                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7033                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7034                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7035                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7036                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7037                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7038                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7039                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7040                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7041                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7042         }
7043
7044         bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7045         if (!CHIP_IS_E1(bp)) {
7046                 /* in E3 this done in per-port section */
7047                 if (!CHIP_IS_E3(bp))
7048                         REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7049         }
7050         if (CHIP_IS_E1H(bp))
7051                 /* not applicable for E2 (and above ...) */
7052                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7053
7054         if (CHIP_REV_IS_SLOW(bp))
7055                 msleep(200);
7056
7057         /* finish CFC init */
7058         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7059         if (val != 1) {
7060                 BNX2X_ERR("CFC LL_INIT failed\n");
7061                 return -EBUSY;
7062         }
7063         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7064         if (val != 1) {
7065                 BNX2X_ERR("CFC AC_INIT failed\n");
7066                 return -EBUSY;
7067         }
7068         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7069         if (val != 1) {
7070                 BNX2X_ERR("CFC CAM_INIT failed\n");
7071                 return -EBUSY;
7072         }
7073         REG_WR(bp, CFC_REG_DEBUG0, 0);
7074
7075         if (CHIP_IS_E1(bp)) {
7076                 /* read NIG statistic
7077                    to see if this is our first up since powerup */
7078                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7079                 val = *bnx2x_sp(bp, wb_data[0]);
7080
7081                 /* do internal memory self test */
7082                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7083                         BNX2X_ERR("internal mem self test failed\n");
7084                         return -EBUSY;
7085                 }
7086         }
7087
7088         bnx2x_setup_fan_failure_detection(bp);
7089
7090         /* clear PXP2 attentions */
7091         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7092
7093         bnx2x_enable_blocks_attention(bp);
7094         bnx2x_enable_blocks_parity(bp);
7095
7096         if (!BP_NOMCP(bp)) {
7097                 if (CHIP_IS_E1x(bp))
7098                         bnx2x__common_init_phy(bp);
7099         } else
7100                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7101
7102         return 0;
7103 }
7104
7105 /**
7106  * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7107  *
7108  * @bp:         driver handle
7109  */
7110 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7111 {
7112         int rc = bnx2x_init_hw_common(bp);
7113
7114         if (rc)
7115                 return rc;
7116
7117         /* In E2 2-PORT mode, same ext phy is used for the two paths */
7118         if (!BP_NOMCP(bp))
7119                 bnx2x__common_init_phy(bp);
7120
7121         return 0;
7122 }
7123
7124 static int bnx2x_init_hw_port(struct bnx2x *bp)
7125 {
7126         int port = BP_PORT(bp);
7127         int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7128         u32 low, high;
7129         u32 val;
7130
7131         DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
7132
7133         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7134
7135         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7136         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7137         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7138
7139         /* Timers bug workaround: disables the pf_master bit in pglue at
7140          * common phase, we need to enable it here before any dmae access are
7141          * attempted. Therefore we manually added the enable-master to the
7142          * port phase (it also happens in the function phase)
7143          */
7144         if (!CHIP_IS_E1x(bp))
7145                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7146
7147         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7148         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7149         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7150         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7151
7152         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7153         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7154         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7155         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7156
7157         /* QM cid (connection) count */
7158         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7159
7160         if (CNIC_SUPPORT(bp)) {
7161                 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7162                 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7163                 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7164         }
7165
7166         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7167
7168         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7169
7170         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7171
7172                 if (IS_MF(bp))
7173                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7174                 else if (bp->dev->mtu > 4096) {
7175                         if (bp->flags & ONE_PORT_FLAG)
7176                                 low = 160;
7177                         else {
7178                                 val = bp->dev->mtu;
7179                                 /* (24*1024 + val*4)/256 */
7180                                 low = 96 + (val/64) +
7181                                                 ((val % 64) ? 1 : 0);
7182                         }
7183                 } else
7184                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7185                 high = low + 56;        /* 14*1024/256 */
7186                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7187                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7188         }
7189
7190         if (CHIP_MODE_IS_4_PORT(bp))
7191                 REG_WR(bp, (BP_PORT(bp) ?
7192                             BRB1_REG_MAC_GUARANTIED_1 :
7193                             BRB1_REG_MAC_GUARANTIED_0), 40);
7194
7195         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7196         if (CHIP_IS_E3B0(bp)) {
7197                 if (IS_MF_AFEX(bp)) {
7198                         /* configure headers for AFEX mode */
7199                         REG_WR(bp, BP_PORT(bp) ?
7200                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7201                                PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7202                         REG_WR(bp, BP_PORT(bp) ?
7203                                PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7204                                PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7205                         REG_WR(bp, BP_PORT(bp) ?
7206                                PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7207                                PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7208                 } else {
7209                         /* Ovlan exists only if we are in multi-function +
7210                          * switch-dependent mode, in switch-independent there
7211                          * is no ovlan headers
7212                          */
7213                         REG_WR(bp, BP_PORT(bp) ?
7214                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7215                                PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7216                                (bp->path_has_ovlan ? 7 : 6));
7217                 }
7218         }
7219
7220         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7221         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7222         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7223         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7224
7225         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7226         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7227         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7228         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7229
7230         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7231         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7232
7233         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7234
7235         if (CHIP_IS_E1x(bp)) {
7236                 /* configure PBF to work without PAUSE mtu 9000 */
7237                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7238
7239                 /* update threshold */
7240                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7241                 /* update init credit */
7242                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7243
7244                 /* probe changes */
7245                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7246                 udelay(50);
7247                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7248         }
7249
7250         if (CNIC_SUPPORT(bp))
7251                 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7252
7253         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7254         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7255
7256         if (CHIP_IS_E1(bp)) {
7257                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7258                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7259         }
7260         bnx2x_init_block(bp, BLOCK_HC, init_phase);
7261
7262         bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7263
7264         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7265         /* init aeu_mask_attn_func_0/1:
7266          *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7267          *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7268          *             bits 4-7 are used for "per vn group attention" */
7269         val = IS_MF(bp) ? 0xF7 : 0x7;
7270         /* Enable DCBX attention for all but E1 */
7271         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7272         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7273
7274         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7275
7276         if (!CHIP_IS_E1x(bp)) {
7277                 /* Bit-map indicating which L2 hdrs may appear after the
7278                  * basic Ethernet header
7279                  */
7280                 if (IS_MF_AFEX(bp))
7281                         REG_WR(bp, BP_PORT(bp) ?
7282                                NIG_REG_P1_HDRS_AFTER_BASIC :
7283                                NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7284                 else
7285                         REG_WR(bp, BP_PORT(bp) ?
7286                                NIG_REG_P1_HDRS_AFTER_BASIC :
7287                                NIG_REG_P0_HDRS_AFTER_BASIC,
7288                                IS_MF_SD(bp) ? 7 : 6);
7289
7290                 if (CHIP_IS_E3(bp))
7291                         REG_WR(bp, BP_PORT(bp) ?
7292                                    NIG_REG_LLH1_MF_MODE :
7293                                    NIG_REG_LLH_MF_MODE, IS_MF(bp));
7294         }
7295         if (!CHIP_IS_E3(bp))
7296                 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7297
7298         if (!CHIP_IS_E1(bp)) {
7299                 /* 0x2 disable mf_ov, 0x1 enable */
7300                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7301                        (IS_MF_SD(bp) ? 0x1 : 0x2));
7302
7303                 if (!CHIP_IS_E1x(bp)) {
7304                         val = 0;
7305                         switch (bp->mf_mode) {
7306                         case MULTI_FUNCTION_SD:
7307                                 val = 1;
7308                                 break;
7309                         case MULTI_FUNCTION_SI:
7310                         case MULTI_FUNCTION_AFEX:
7311                                 val = 2;
7312                                 break;
7313                         }
7314
7315                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7316                                                   NIG_REG_LLH0_CLS_TYPE), val);
7317                 }
7318                 {
7319                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7320                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7321                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7322                 }
7323         }
7324
7325         /* If SPIO5 is set to generate interrupts, enable it for this port */
7326         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7327         if (val & MISC_SPIO_SPIO5) {
7328                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7329                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7330                 val = REG_RD(bp, reg_addr);
7331                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7332                 REG_WR(bp, reg_addr, val);
7333         }
7334
7335         return 0;
7336 }
7337
7338 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7339 {
7340         int reg;
7341         u32 wb_write[2];
7342
7343         if (CHIP_IS_E1(bp))
7344                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7345         else
7346                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7347
7348         wb_write[0] = ONCHIP_ADDR1(addr);
7349         wb_write[1] = ONCHIP_ADDR2(addr);
7350         REG_WR_DMAE(bp, reg, wb_write, 2);
7351 }
7352
7353 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7354 {
7355         u32 data, ctl, cnt = 100;
7356         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7357         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7358         u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7359         u32 sb_bit =  1 << (idu_sb_id%32);
7360         u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7361         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7362
7363         /* Not supported in BC mode */
7364         if (CHIP_INT_MODE_IS_BC(bp))
7365                 return;
7366
7367         data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7368                         << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
7369                 IGU_REGULAR_CLEANUP_SET                         |
7370                 IGU_REGULAR_BCLEANUP;
7371
7372         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
7373               func_encode << IGU_CTRL_REG_FID_SHIFT             |
7374               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7375
7376         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7377                          data, igu_addr_data);
7378         REG_WR(bp, igu_addr_data, data);
7379         mmiowb();
7380         barrier();
7381         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7382                           ctl, igu_addr_ctl);
7383         REG_WR(bp, igu_addr_ctl, ctl);
7384         mmiowb();
7385         barrier();
7386
7387         /* wait for clean up to finish */
7388         while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7389                 msleep(20);
7390
7391         if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7392                 DP(NETIF_MSG_HW,
7393                    "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7394                           idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7395         }
7396 }
7397
7398 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7399 {
7400         bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7401 }
7402
7403 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7404 {
7405         u32 i, base = FUNC_ILT_BASE(func);
7406         for (i = base; i < base + ILT_PER_FUNC; i++)
7407                 bnx2x_ilt_wr(bp, i, 0);
7408 }
7409
7410 static void bnx2x_init_searcher(struct bnx2x *bp)
7411 {
7412         int port = BP_PORT(bp);
7413         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7414         /* T1 hash bits value determines the T1 number of entries */
7415         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7416 }
7417
7418 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7419 {
7420         int rc;
7421         struct bnx2x_func_state_params func_params = {NULL};
7422         struct bnx2x_func_switch_update_params *switch_update_params =
7423                 &func_params.params.switch_update;
7424
7425         /* Prepare parameters for function state transitions */
7426         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7427         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7428
7429         func_params.f_obj = &bp->func_obj;
7430         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7431
7432         /* Function parameters */
7433         switch_update_params->suspend = suspend;
7434
7435         rc = bnx2x_func_state_change(bp, &func_params);
7436
7437         return rc;
7438 }
7439
7440 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7441 {
7442         int rc, i, port = BP_PORT(bp);
7443         int vlan_en = 0, mac_en[NUM_MACS];
7444
7445         /* Close input from network */
7446         if (bp->mf_mode == SINGLE_FUNCTION) {
7447                 bnx2x_set_rx_filter(&bp->link_params, 0);
7448         } else {
7449                 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7450                                    NIG_REG_LLH0_FUNC_EN);
7451                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7452                           NIG_REG_LLH0_FUNC_EN, 0);
7453                 for (i = 0; i < NUM_MACS; i++) {
7454                         mac_en[i] = REG_RD(bp, port ?
7455                                              (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7456                                               4 * i) :
7457                                              (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7458                                               4 * i));
7459                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7460                                               4 * i) :
7461                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7462                 }
7463         }
7464
7465         /* Close BMC to host */
7466         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7467                NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7468
7469         /* Suspend Tx switching to the PF. Completion of this ramrod
7470          * further guarantees that all the packets of that PF / child
7471          * VFs in BRB were processed by the Parser, so it is safe to
7472          * change the NIC_MODE register.
7473          */
7474         rc = bnx2x_func_switch_update(bp, 1);
7475         if (rc) {
7476                 BNX2X_ERR("Can't suspend tx-switching!\n");
7477                 return rc;
7478         }
7479
7480         /* Change NIC_MODE register */
7481         REG_WR(bp, PRS_REG_NIC_MODE, 0);
7482
7483         /* Open input from network */
7484         if (bp->mf_mode == SINGLE_FUNCTION) {
7485                 bnx2x_set_rx_filter(&bp->link_params, 1);
7486         } else {
7487                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7488                           NIG_REG_LLH0_FUNC_EN, vlan_en);
7489                 for (i = 0; i < NUM_MACS; i++) {
7490                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7491                                               4 * i) :
7492                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7493                                   mac_en[i]);
7494                 }
7495         }
7496
7497         /* Enable BMC to host */
7498         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7499                NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7500
7501         /* Resume Tx switching to the PF */
7502         rc = bnx2x_func_switch_update(bp, 0);
7503         if (rc) {
7504                 BNX2X_ERR("Can't resume tx-switching!\n");
7505                 return rc;
7506         }
7507
7508         DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7509         return 0;
7510 }
7511
7512 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7513 {
7514         int rc;
7515
7516         bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7517
7518         if (CONFIGURE_NIC_MODE(bp)) {
7519                 /* Configure searcher as part of function hw init */
7520                 bnx2x_init_searcher(bp);
7521
7522                 /* Reset NIC mode */
7523                 rc = bnx2x_reset_nic_mode(bp);
7524                 if (rc)
7525                         BNX2X_ERR("Can't change NIC mode!\n");
7526                 return rc;
7527         }
7528
7529         return 0;
7530 }
7531
7532 static int bnx2x_init_hw_func(struct bnx2x *bp)
7533 {
7534         int port = BP_PORT(bp);
7535         int func = BP_FUNC(bp);
7536         int init_phase = PHASE_PF0 + func;
7537         struct bnx2x_ilt *ilt = BP_ILT(bp);
7538         u16 cdu_ilt_start;
7539         u32 addr, val;
7540         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7541         int i, main_mem_width, rc;
7542
7543         DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
7544
7545         /* FLR cleanup - hmmm */
7546         if (!CHIP_IS_E1x(bp)) {
7547                 rc = bnx2x_pf_flr_clnup(bp);
7548                 if (rc) {
7549                         bnx2x_fw_dump(bp);
7550                         return rc;
7551                 }
7552         }
7553
7554         /* set MSI reconfigure capability */
7555         if (bp->common.int_block == INT_BLOCK_HC) {
7556                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7557                 val = REG_RD(bp, addr);
7558                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7559                 REG_WR(bp, addr, val);
7560         }
7561
7562         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7563         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7564
7565         ilt = BP_ILT(bp);
7566         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7567
7568         if (IS_SRIOV(bp))
7569                 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7570         cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7571
7572         /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7573          * those of the VFs, so start line should be reset
7574          */
7575         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7576         for (i = 0; i < L2_ILT_LINES(bp); i++) {
7577                 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7578                 ilt->lines[cdu_ilt_start + i].page_mapping =
7579                         bp->context[i].cxt_mapping;
7580                 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7581         }
7582
7583         bnx2x_ilt_init_op(bp, INITOP_SET);
7584
7585         if (!CONFIGURE_NIC_MODE(bp)) {
7586                 bnx2x_init_searcher(bp);
7587                 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7588                 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7589         } else {
7590                 /* Set NIC mode */
7591                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7592                 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7593         }
7594
7595         if (!CHIP_IS_E1x(bp)) {
7596                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7597
7598                 /* Turn on a single ISR mode in IGU if driver is going to use
7599                  * INT#x or MSI
7600                  */
7601                 if (!(bp->flags & USING_MSIX_FLAG))
7602                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7603                 /*
7604                  * Timers workaround bug: function init part.
7605                  * Need to wait 20msec after initializing ILT,
7606                  * needed to make sure there are no requests in
7607                  * one of the PXP internal queues with "old" ILT addresses
7608                  */
7609                 msleep(20);
7610                 /*
7611                  * Master enable - Due to WB DMAE writes performed before this
7612                  * register is re-initialized as part of the regular function
7613                  * init
7614                  */
7615                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7616                 /* Enable the function in IGU */
7617                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7618         }
7619
7620         bp->dmae_ready = 1;
7621
7622         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7623
7624         if (!CHIP_IS_E1x(bp))
7625                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7626
7627         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7628         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7629         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7630         bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7631         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7632         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7633         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7634         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7635         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7636         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7637         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7638         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7639         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7640
7641         if (!CHIP_IS_E1x(bp))
7642                 REG_WR(bp, QM_REG_PF_EN, 1);
7643
7644         if (!CHIP_IS_E1x(bp)) {
7645                 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7646                 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7647                 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7648                 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7649         }
7650         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7651
7652         bnx2x_init_block(bp, BLOCK_TM, init_phase);
7653         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7654         REG_WR(bp, DORQ_REG_MODE_ACT, 1); /* no dpm */
7655
7656         bnx2x_iov_init_dq(bp);
7657
7658         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7659         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7660         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7661         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7662         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7663         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7664         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7665         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7666         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7667         if (!CHIP_IS_E1x(bp))
7668                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7669
7670         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7671
7672         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7673
7674         if (!CHIP_IS_E1x(bp))
7675                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7676
7677         if (IS_MF(bp)) {
7678                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7679                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7680         }
7681
7682         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7683
7684         /* HC init per function */
7685         if (bp->common.int_block == INT_BLOCK_HC) {
7686                 if (CHIP_IS_E1H(bp)) {
7687                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7688
7689                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7690                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7691                 }
7692                 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7693
7694         } else {
7695                 int num_segs, sb_idx, prod_offset;
7696
7697                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7698
7699                 if (!CHIP_IS_E1x(bp)) {
7700                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7701                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7702                 }
7703
7704                 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7705
7706                 if (!CHIP_IS_E1x(bp)) {
7707                         int dsb_idx = 0;
7708                         /**
7709                          * Producer memory:
7710                          * E2 mode: address 0-135 match to the mapping memory;
7711                          * 136 - PF0 default prod; 137 - PF1 default prod;
7712                          * 138 - PF2 default prod; 139 - PF3 default prod;
7713                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
7714                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
7715                          * 144-147 reserved.
7716                          *
7717                          * E1.5 mode - In backward compatible mode;
7718                          * for non default SB; each even line in the memory
7719                          * holds the U producer and each odd line hold
7720                          * the C producer. The first 128 producers are for
7721                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7722                          * producers are for the DSB for each PF.
7723                          * Each PF has five segments: (the order inside each
7724                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7725                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7726                          * 144-147 attn prods;
7727                          */
7728                         /* non-default-status-blocks */
7729                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7730                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7731                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7732                                 prod_offset = (bp->igu_base_sb + sb_idx) *
7733                                         num_segs;
7734
7735                                 for (i = 0; i < num_segs; i++) {
7736                                         addr = IGU_REG_PROD_CONS_MEMORY +
7737                                                         (prod_offset + i) * 4;
7738                                         REG_WR(bp, addr, 0);
7739                                 }
7740                                 /* send consumer update with value 0 */
7741                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7742                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7743                                 bnx2x_igu_clear_sb(bp,
7744                                                    bp->igu_base_sb + sb_idx);
7745                         }
7746
7747                         /* default-status-blocks */
7748                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7749                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7750
7751                         if (CHIP_MODE_IS_4_PORT(bp))
7752                                 dsb_idx = BP_FUNC(bp);
7753                         else
7754                                 dsb_idx = BP_VN(bp);
7755
7756                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7757                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
7758                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
7759
7760                         /*
7761                          * igu prods come in chunks of E1HVN_MAX (4) -
7762                          * does not matters what is the current chip mode
7763                          */
7764                         for (i = 0; i < (num_segs * E1HVN_MAX);
7765                              i += E1HVN_MAX) {
7766                                 addr = IGU_REG_PROD_CONS_MEMORY +
7767                                                         (prod_offset + i)*4;
7768                                 REG_WR(bp, addr, 0);
7769                         }
7770                         /* send consumer update with 0 */
7771                         if (CHIP_INT_MODE_IS_BC(bp)) {
7772                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7773                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7774                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7775                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
7776                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7777                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
7778                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7779                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
7780                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7781                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
7782                         } else {
7783                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7784                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7785                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7786                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
7787                         }
7788                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7789
7790                         /* !!! These should become driver const once
7791                            rf-tool supports split-68 const */
7792                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7793                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7794                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7795                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7796                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7797                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7798                 }
7799         }
7800
7801         /* Reset PCIE errors for debug */
7802         REG_WR(bp, 0x2114, 0xffffffff);
7803         REG_WR(bp, 0x2120, 0xffffffff);
7804
7805         if (CHIP_IS_E1x(bp)) {
7806                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7807                 main_mem_base = HC_REG_MAIN_MEMORY +
7808                                 BP_PORT(bp) * (main_mem_size * 4);
7809                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7810                 main_mem_width = 8;
7811
7812                 val = REG_RD(bp, main_mem_prty_clr);
7813                 if (val)
7814                         DP(NETIF_MSG_HW,
7815                            "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7816                            val);
7817
7818                 /* Clear "false" parity errors in MSI-X table */
7819                 for (i = main_mem_base;
7820                      i < main_mem_base + main_mem_size * 4;
7821                      i += main_mem_width) {
7822                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
7823                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7824                                          i, main_mem_width / 4);
7825                 }
7826                 /* Clear HC parity attention */
7827                 REG_RD(bp, main_mem_prty_clr);
7828         }
7829
7830 #ifdef BNX2X_STOP_ON_ERROR
7831         /* Enable STORMs SP logging */
7832         REG_WR8(bp, BAR_USTRORM_INTMEM +
7833                USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7834         REG_WR8(bp, BAR_TSTRORM_INTMEM +
7835                TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7836         REG_WR8(bp, BAR_CSTRORM_INTMEM +
7837                CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7838         REG_WR8(bp, BAR_XSTRORM_INTMEM +
7839                XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7840 #endif
7841
7842         bnx2x_phy_probe(&bp->link_params);
7843
7844         return 0;
7845 }
7846
7847 void bnx2x_free_mem_cnic(struct bnx2x *bp)
7848 {
7849         bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7850
7851         if (!CHIP_IS_E1x(bp))
7852                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7853                                sizeof(struct host_hc_status_block_e2));
7854         else
7855                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7856                                sizeof(struct host_hc_status_block_e1x));
7857
7858         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7859 }
7860
7861 void bnx2x_free_mem(struct bnx2x *bp)
7862 {
7863         int i;
7864
7865         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7866                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7867
7868         if (IS_VF(bp))
7869                 return;
7870
7871         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7872                        sizeof(struct host_sp_status_block));
7873
7874         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7875                        sizeof(struct bnx2x_slowpath));
7876
7877         for (i = 0; i < L2_ILT_LINES(bp); i++)
7878                 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7879                                bp->context[i].size);
7880         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7881
7882         BNX2X_FREE(bp->ilt->lines);
7883
7884         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7885
7886         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7887                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
7888
7889         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7890
7891         bnx2x_iov_free_mem(bp);
7892 }
7893
7894 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7895 {
7896         if (!CHIP_IS_E1x(bp))
7897                 /* size = the status block + ramrod buffers */
7898                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7899                                 sizeof(struct host_hc_status_block_e2));
7900         else
7901                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7902                                 &bp->cnic_sb_mapping,
7903                                 sizeof(struct
7904                                        host_hc_status_block_e1x));
7905
7906         if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
7907                 /* allocate searcher T2 table, as it wasn't allocated before */
7908                 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7909
7910         /* write address to which L5 should insert its values */
7911         bp->cnic_eth_dev.addr_drv_info_to_mcp =
7912                 &bp->slowpath->drv_info_to_mcp;
7913
7914         if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7915                 goto alloc_mem_err;
7916
7917         return 0;
7918
7919 alloc_mem_err:
7920         bnx2x_free_mem_cnic(bp);
7921         BNX2X_ERR("Can't allocate memory\n");
7922         return -ENOMEM;
7923 }
7924
7925 int bnx2x_alloc_mem(struct bnx2x *bp)
7926 {
7927         int i, allocated, context_size;
7928
7929         if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
7930                 /* allocate searcher T2 table */
7931                 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7932
7933         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7934                         sizeof(struct host_sp_status_block));
7935
7936         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7937                         sizeof(struct bnx2x_slowpath));
7938
7939         /* Allocate memory for CDU context:
7940          * This memory is allocated separately and not in the generic ILT
7941          * functions because CDU differs in few aspects:
7942          * 1. There are multiple entities allocating memory for context -
7943          * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7944          * its own ILT lines.
7945          * 2. Since CDU page-size is not a single 4KB page (which is the case
7946          * for the other ILT clients), to be efficient we want to support
7947          * allocation of sub-page-size in the last entry.
7948          * 3. Context pointers are used by the driver to pass to FW / update
7949          * the context (for the other ILT clients the pointers are used just to
7950          * free the memory during unload).
7951          */
7952         context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7953
7954         for (i = 0, allocated = 0; allocated < context_size; i++) {
7955                 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7956                                           (context_size - allocated));
7957                 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7958                                 &bp->context[i].cxt_mapping,
7959                                 bp->context[i].size);
7960                 allocated += bp->context[i].size;
7961         }
7962         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7963
7964         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7965                 goto alloc_mem_err;
7966
7967         if (bnx2x_iov_alloc_mem(bp))
7968                 goto alloc_mem_err;
7969
7970         /* Slow path ring */
7971         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7972
7973         /* EQ */
7974         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7975                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
7976
7977         return 0;
7978
7979 alloc_mem_err:
7980         bnx2x_free_mem(bp);
7981         BNX2X_ERR("Can't allocate memory\n");
7982         return -ENOMEM;
7983 }
7984
7985 /*
7986  * Init service functions
7987  */
7988
7989 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7990                       struct bnx2x_vlan_mac_obj *obj, bool set,
7991                       int mac_type, unsigned long *ramrod_flags)
7992 {
7993         int rc;
7994         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
7995
7996         memset(&ramrod_param, 0, sizeof(ramrod_param));
7997
7998         /* Fill general parameters */
7999         ramrod_param.vlan_mac_obj = obj;
8000         ramrod_param.ramrod_flags = *ramrod_flags;
8001
8002         /* Fill a user request section if needed */
8003         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8004                 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8005
8006                 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8007
8008                 /* Set the command: ADD or DEL */
8009                 if (set)
8010                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8011                 else
8012                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8013         }
8014
8015         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8016
8017         if (rc == -EEXIST) {
8018                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8019                 /* do not treat adding same MAC as error */
8020                 rc = 0;
8021         } else if (rc < 0)
8022                 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8023
8024         return rc;
8025 }
8026
8027 int bnx2x_del_all_macs(struct bnx2x *bp,
8028                        struct bnx2x_vlan_mac_obj *mac_obj,
8029                        int mac_type, bool wait_for_comp)
8030 {
8031         int rc;
8032         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8033
8034         /* Wait for completion of requested */
8035         if (wait_for_comp)
8036                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8037
8038         /* Set the mac type of addresses we want to clear */
8039         __set_bit(mac_type, &vlan_mac_flags);
8040
8041         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8042         if (rc < 0)
8043                 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8044
8045         return rc;
8046 }
8047
8048 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8049 {
8050         if (is_zero_ether_addr(bp->dev->dev_addr) &&
8051             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
8052                 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
8053                    "Ignoring Zero MAC for STORAGE SD mode\n");
8054                 return 0;
8055         }
8056
8057         if (IS_PF(bp)) {
8058                 unsigned long ramrod_flags = 0;
8059
8060                 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8061                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8062                 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8063                                          &bp->sp_objs->mac_obj, set,
8064                                          BNX2X_ETH_MAC, &ramrod_flags);
8065         } else { /* vf */
8066                 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8067                                              bp->fp->index, true);
8068         }
8069 }
8070
8071 int bnx2x_setup_leading(struct bnx2x *bp)
8072 {
8073         if (IS_PF(bp))
8074                 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8075         else /* VF */
8076                 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8077 }
8078
8079 /**
8080  * bnx2x_set_int_mode - configure interrupt mode
8081  *
8082  * @bp:         driver handle
8083  *
8084  * In case of MSI-X it will also try to enable MSI-X.
8085  */
8086 int bnx2x_set_int_mode(struct bnx2x *bp)
8087 {
8088         int rc = 0;
8089
8090         if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8091                 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8092                 return -EINVAL;
8093         }
8094
8095         switch (int_mode) {
8096         case BNX2X_INT_MODE_MSIX:
8097                 /* attempt to enable msix */
8098                 rc = bnx2x_enable_msix(bp);
8099
8100                 /* msix attained */
8101                 if (!rc)
8102                         return 0;
8103
8104                 /* vfs use only msix */
8105                 if (rc && IS_VF(bp))
8106                         return rc;
8107
8108                 /* failed to enable multiple MSI-X */
8109                 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8110                                bp->num_queues,
8111                                1 + bp->num_cnic_queues);
8112
8113                 /* falling through... */
8114         case BNX2X_INT_MODE_MSI:
8115                 bnx2x_enable_msi(bp);
8116
8117                 /* falling through... */
8118         case BNX2X_INT_MODE_INTX:
8119                 bp->num_ethernet_queues = 1;
8120                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8121                 BNX2X_DEV_INFO("set number of queues to 1\n");
8122                 break;
8123         default:
8124                 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8125                 return -EINVAL;
8126         }
8127         return 0;
8128 }
8129
8130 /* must be called prior to any HW initializations */
8131 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8132 {
8133         if (IS_SRIOV(bp))
8134                 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8135         return L2_ILT_LINES(bp);
8136 }
8137
8138 void bnx2x_ilt_set_info(struct bnx2x *bp)
8139 {
8140         struct ilt_client_info *ilt_client;
8141         struct bnx2x_ilt *ilt = BP_ILT(bp);
8142         u16 line = 0;
8143
8144         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8145         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8146
8147         /* CDU */
8148         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8149         ilt_client->client_num = ILT_CLIENT_CDU;
8150         ilt_client->page_size = CDU_ILT_PAGE_SZ;
8151         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8152         ilt_client->start = line;
8153         line += bnx2x_cid_ilt_lines(bp);
8154
8155         if (CNIC_SUPPORT(bp))
8156                 line += CNIC_ILT_LINES;
8157         ilt_client->end = line - 1;
8158
8159         DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8160            ilt_client->start,
8161            ilt_client->end,
8162            ilt_client->page_size,
8163            ilt_client->flags,
8164            ilog2(ilt_client->page_size >> 12));
8165
8166         /* QM */
8167         if (QM_INIT(bp->qm_cid_count)) {
8168                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8169                 ilt_client->client_num = ILT_CLIENT_QM;
8170                 ilt_client->page_size = QM_ILT_PAGE_SZ;
8171                 ilt_client->flags = 0;
8172                 ilt_client->start = line;
8173
8174                 /* 4 bytes for each cid */
8175                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8176                                                          QM_ILT_PAGE_SZ);
8177
8178                 ilt_client->end = line - 1;
8179
8180                 DP(NETIF_MSG_IFUP,
8181                    "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8182                    ilt_client->start,
8183                    ilt_client->end,
8184                    ilt_client->page_size,
8185                    ilt_client->flags,
8186                    ilog2(ilt_client->page_size >> 12));
8187         }
8188
8189         if (CNIC_SUPPORT(bp)) {
8190                 /* SRC */
8191                 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8192                 ilt_client->client_num = ILT_CLIENT_SRC;
8193                 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8194                 ilt_client->flags = 0;
8195                 ilt_client->start = line;
8196                 line += SRC_ILT_LINES;
8197                 ilt_client->end = line - 1;
8198
8199                 DP(NETIF_MSG_IFUP,
8200                    "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8201                    ilt_client->start,
8202                    ilt_client->end,
8203                    ilt_client->page_size,
8204                    ilt_client->flags,
8205                    ilog2(ilt_client->page_size >> 12));
8206
8207                 /* TM */
8208                 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8209                 ilt_client->client_num = ILT_CLIENT_TM;
8210                 ilt_client->page_size = TM_ILT_PAGE_SZ;
8211                 ilt_client->flags = 0;
8212                 ilt_client->start = line;
8213                 line += TM_ILT_LINES;
8214                 ilt_client->end = line - 1;
8215
8216                 DP(NETIF_MSG_IFUP,
8217                    "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8218                    ilt_client->start,
8219                    ilt_client->end,
8220                    ilt_client->page_size,
8221                    ilt_client->flags,
8222                    ilog2(ilt_client->page_size >> 12));
8223         }
8224
8225         BUG_ON(line > ILT_MAX_LINES);
8226 }
8227
8228 /**
8229  * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8230  *
8231  * @bp:                 driver handle
8232  * @fp:                 pointer to fastpath
8233  * @init_params:        pointer to parameters structure
8234  *
8235  * parameters configured:
8236  *      - HC configuration
8237  *      - Queue's CDU context
8238  */
8239 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8240         struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8241 {
8242         u8 cos;
8243         int cxt_index, cxt_offset;
8244
8245         /* FCoE Queue uses Default SB, thus has no HC capabilities */
8246         if (!IS_FCOE_FP(fp)) {
8247                 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8248                 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8249
8250                 /* If HC is supported, enable host coalescing in the transition
8251                  * to INIT state.
8252                  */
8253                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8254                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8255
8256                 /* HC rate */
8257                 init_params->rx.hc_rate = bp->rx_ticks ?
8258                         (1000000 / bp->rx_ticks) : 0;
8259                 init_params->tx.hc_rate = bp->tx_ticks ?
8260                         (1000000 / bp->tx_ticks) : 0;
8261
8262                 /* FW SB ID */
8263                 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8264                         fp->fw_sb_id;
8265
8266                 /*
8267                  * CQ index among the SB indices: FCoE clients uses the default
8268                  * SB, therefore it's different.
8269                  */
8270                 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8271                 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8272         }
8273
8274         /* set maximum number of COSs supported by this queue */
8275         init_params->max_cos = fp->max_cos;
8276
8277         DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8278             fp->index, init_params->max_cos);
8279
8280         /* set the context pointers queue object */
8281         for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8282                 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8283                 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8284                                 ILT_PAGE_CIDS);
8285                 init_params->cxts[cos] =
8286                         &bp->context[cxt_index].vcxt[cxt_offset].eth;
8287         }
8288 }
8289
8290 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8291                         struct bnx2x_queue_state_params *q_params,
8292                         struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8293                         int tx_index, bool leading)
8294 {
8295         memset(tx_only_params, 0, sizeof(*tx_only_params));
8296
8297         /* Set the command */
8298         q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8299
8300         /* Set tx-only QUEUE flags: don't zero statistics */
8301         tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8302
8303         /* choose the index of the cid to send the slow path on */
8304         tx_only_params->cid_index = tx_index;
8305
8306         /* Set general TX_ONLY_SETUP parameters */
8307         bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8308
8309         /* Set Tx TX_ONLY_SETUP parameters */
8310         bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8311
8312         DP(NETIF_MSG_IFUP,
8313            "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8314            tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8315            q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8316            tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8317
8318         /* send the ramrod */
8319         return bnx2x_queue_state_change(bp, q_params);
8320 }
8321
8322 /**
8323  * bnx2x_setup_queue - setup queue
8324  *
8325  * @bp:         driver handle
8326  * @fp:         pointer to fastpath
8327  * @leading:    is leading
8328  *
8329  * This function performs 2 steps in a Queue state machine
8330  *      actually: 1) RESET->INIT 2) INIT->SETUP
8331  */
8332
8333 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8334                        bool leading)
8335 {
8336         struct bnx2x_queue_state_params q_params = {NULL};
8337         struct bnx2x_queue_setup_params *setup_params =
8338                                                 &q_params.params.setup;
8339         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8340                                                 &q_params.params.tx_only;
8341         int rc;
8342         u8 tx_index;
8343
8344         DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8345
8346         /* reset IGU state skip FCoE L2 queue */
8347         if (!IS_FCOE_FP(fp))
8348                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8349                              IGU_INT_ENABLE, 0);
8350
8351         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8352         /* We want to wait for completion in this context */
8353         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8354
8355         /* Prepare the INIT parameters */
8356         bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8357
8358         /* Set the command */
8359         q_params.cmd = BNX2X_Q_CMD_INIT;
8360
8361         /* Change the state to INIT */
8362         rc = bnx2x_queue_state_change(bp, &q_params);
8363         if (rc) {
8364                 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8365                 return rc;
8366         }
8367
8368         DP(NETIF_MSG_IFUP, "init complete\n");
8369
8370         /* Now move the Queue to the SETUP state... */
8371         memset(setup_params, 0, sizeof(*setup_params));
8372
8373         /* Set QUEUE flags */
8374         setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8375
8376         /* Set general SETUP parameters */
8377         bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8378                                 FIRST_TX_COS_INDEX);
8379
8380         bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8381                             &setup_params->rxq_params);
8382
8383         bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8384                            FIRST_TX_COS_INDEX);
8385
8386         /* Set the command */
8387         q_params.cmd = BNX2X_Q_CMD_SETUP;
8388
8389         if (IS_FCOE_FP(fp))
8390                 bp->fcoe_init = true;
8391
8392         /* Change the state to SETUP */
8393         rc = bnx2x_queue_state_change(bp, &q_params);
8394         if (rc) {
8395                 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8396                 return rc;
8397         }
8398
8399         /* loop through the relevant tx-only indices */
8400         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8401               tx_index < fp->max_cos;
8402               tx_index++) {
8403
8404                 /* prepare and send tx-only ramrod*/
8405                 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8406                                           tx_only_params, tx_index, leading);
8407                 if (rc) {
8408                         BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8409                                   fp->index, tx_index);
8410                         return rc;
8411                 }
8412         }
8413
8414         return rc;
8415 }
8416
8417 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8418 {
8419         struct bnx2x_fastpath *fp = &bp->fp[index];
8420         struct bnx2x_fp_txdata *txdata;
8421         struct bnx2x_queue_state_params q_params = {NULL};
8422         int rc, tx_index;
8423
8424         DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8425
8426         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8427         /* We want to wait for completion in this context */
8428         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8429
8430         /* close tx-only connections */
8431         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8432              tx_index < fp->max_cos;
8433              tx_index++){
8434
8435                 /* ascertain this is a normal queue*/
8436                 txdata = fp->txdata_ptr[tx_index];
8437
8438                 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8439                                                         txdata->txq_index);
8440
8441                 /* send halt terminate on tx-only connection */
8442                 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8443                 memset(&q_params.params.terminate, 0,
8444                        sizeof(q_params.params.terminate));
8445                 q_params.params.terminate.cid_index = tx_index;
8446
8447                 rc = bnx2x_queue_state_change(bp, &q_params);
8448                 if (rc)
8449                         return rc;
8450
8451                 /* send halt terminate on tx-only connection */
8452                 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8453                 memset(&q_params.params.cfc_del, 0,
8454                        sizeof(q_params.params.cfc_del));
8455                 q_params.params.cfc_del.cid_index = tx_index;
8456                 rc = bnx2x_queue_state_change(bp, &q_params);
8457                 if (rc)
8458                         return rc;
8459         }
8460         /* Stop the primary connection: */
8461         /* ...halt the connection */
8462         q_params.cmd = BNX2X_Q_CMD_HALT;
8463         rc = bnx2x_queue_state_change(bp, &q_params);
8464         if (rc)
8465                 return rc;
8466
8467         /* ...terminate the connection */
8468         q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8469         memset(&q_params.params.terminate, 0,
8470                sizeof(q_params.params.terminate));
8471         q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8472         rc = bnx2x_queue_state_change(bp, &q_params);
8473         if (rc)
8474                 return rc;
8475         /* ...delete cfc entry */
8476         q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8477         memset(&q_params.params.cfc_del, 0,
8478                sizeof(q_params.params.cfc_del));
8479         q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8480         return bnx2x_queue_state_change(bp, &q_params);
8481 }
8482
8483 static void bnx2x_reset_func(struct bnx2x *bp)
8484 {
8485         int port = BP_PORT(bp);
8486         int func = BP_FUNC(bp);
8487         int i;
8488
8489         /* Disable the function in the FW */
8490         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8491         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8492         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8493         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8494
8495         /* FP SBs */
8496         for_each_eth_queue(bp, i) {
8497                 struct bnx2x_fastpath *fp = &bp->fp[i];
8498                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8499                            CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8500                            SB_DISABLED);
8501         }
8502
8503         if (CNIC_LOADED(bp))
8504                 /* CNIC SB */
8505                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8506                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8507                         (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8508
8509         /* SP SB */
8510         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8511                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8512                 SB_DISABLED);
8513
8514         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8515                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8516                        0);
8517
8518         /* Configure IGU */
8519         if (bp->common.int_block == INT_BLOCK_HC) {
8520                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8521                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8522         } else {
8523                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8524                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8525         }
8526
8527         if (CNIC_LOADED(bp)) {
8528                 /* Disable Timer scan */
8529                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8530                 /*
8531                  * Wait for at least 10ms and up to 2 second for the timers
8532                  * scan to complete
8533                  */
8534                 for (i = 0; i < 200; i++) {
8535                         usleep_range(10000, 20000);
8536                         if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8537                                 break;
8538                 }
8539         }
8540         /* Clear ILT */
8541         bnx2x_clear_func_ilt(bp, func);
8542
8543         /* Timers workaround bug for E2: if this is vnic-3,
8544          * we need to set the entire ilt range for this timers.
8545          */
8546         if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8547                 struct ilt_client_info ilt_cli;
8548                 /* use dummy TM client */
8549                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8550                 ilt_cli.start = 0;
8551                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8552                 ilt_cli.client_num = ILT_CLIENT_TM;
8553
8554                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8555         }
8556
8557         /* this assumes that reset_port() called before reset_func()*/
8558         if (!CHIP_IS_E1x(bp))
8559                 bnx2x_pf_disable(bp);
8560
8561         bp->dmae_ready = 0;
8562 }
8563
8564 static void bnx2x_reset_port(struct bnx2x *bp)
8565 {
8566         int port = BP_PORT(bp);
8567         u32 val;
8568
8569         /* Reset physical Link */
8570         bnx2x__link_reset(bp);
8571
8572         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8573
8574         /* Do not rcv packets to BRB */
8575         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8576         /* Do not direct rcv packets that are not for MCP to the BRB */
8577         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8578                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8579
8580         /* Configure AEU */
8581         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8582
8583         msleep(100);
8584         /* Check for BRB port occupancy */
8585         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8586         if (val)
8587                 DP(NETIF_MSG_IFDOWN,
8588                    "BRB1 is not empty  %d blocks are occupied\n", val);
8589
8590         /* TODO: Close Doorbell port? */
8591 }
8592
8593 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8594 {
8595         struct bnx2x_func_state_params func_params = {NULL};
8596
8597         /* Prepare parameters for function state transitions */
8598         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8599
8600         func_params.f_obj = &bp->func_obj;
8601         func_params.cmd = BNX2X_F_CMD_HW_RESET;
8602
8603         func_params.params.hw_init.load_phase = load_code;
8604
8605         return bnx2x_func_state_change(bp, &func_params);
8606 }
8607
8608 static int bnx2x_func_stop(struct bnx2x *bp)
8609 {
8610         struct bnx2x_func_state_params func_params = {NULL};
8611         int rc;
8612
8613         /* Prepare parameters for function state transitions */
8614         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8615         func_params.f_obj = &bp->func_obj;
8616         func_params.cmd = BNX2X_F_CMD_STOP;
8617
8618         /*
8619          * Try to stop the function the 'good way'. If fails (in case
8620          * of a parity error during bnx2x_chip_cleanup()) and we are
8621          * not in a debug mode, perform a state transaction in order to
8622          * enable further HW_RESET transaction.
8623          */
8624         rc = bnx2x_func_state_change(bp, &func_params);
8625         if (rc) {
8626 #ifdef BNX2X_STOP_ON_ERROR
8627                 return rc;
8628 #else
8629                 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8630                 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8631                 return bnx2x_func_state_change(bp, &func_params);
8632 #endif
8633         }
8634
8635         return 0;
8636 }
8637
8638 /**
8639  * bnx2x_send_unload_req - request unload mode from the MCP.
8640  *
8641  * @bp:                 driver handle
8642  * @unload_mode:        requested function's unload mode
8643  *
8644  * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8645  */
8646 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8647 {
8648         u32 reset_code = 0;
8649         int port = BP_PORT(bp);
8650
8651         /* Select the UNLOAD request mode */
8652         if (unload_mode == UNLOAD_NORMAL)
8653                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8654
8655         else if (bp->flags & NO_WOL_FLAG)
8656                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8657
8658         else if (bp->wol) {
8659                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8660                 u8 *mac_addr = bp->dev->dev_addr;
8661                 struct pci_dev *pdev = bp->pdev;
8662                 u32 val;
8663                 u16 pmc;
8664
8665                 /* The mac address is written to entries 1-4 to
8666                  * preserve entry 0 which is used by the PMF
8667                  */
8668                 u8 entry = (BP_VN(bp) + 1)*8;
8669
8670                 val = (mac_addr[0] << 8) | mac_addr[1];
8671                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8672
8673                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8674                       (mac_addr[4] << 8) | mac_addr[5];
8675                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8676
8677                 /* Enable the PME and clear the status */
8678                 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
8679                 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8680                 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
8681
8682                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8683
8684         } else
8685                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8686
8687         /* Send the request to the MCP */
8688         if (!BP_NOMCP(bp))
8689                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8690         else {
8691                 int path = BP_PATH(bp);
8692
8693                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
8694                    path, load_count[path][0], load_count[path][1],
8695                    load_count[path][2]);
8696                 load_count[path][0]--;
8697                 load_count[path][1 + port]--;
8698                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
8699                    path, load_count[path][0], load_count[path][1],
8700                    load_count[path][2]);
8701                 if (load_count[path][0] == 0)
8702                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8703                 else if (load_count[path][1 + port] == 0)
8704                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8705                 else
8706                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8707         }
8708
8709         return reset_code;
8710 }
8711
8712 /**
8713  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8714  *
8715  * @bp:         driver handle
8716  * @keep_link:          true iff link should be kept up
8717  */
8718 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8719 {
8720         u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8721
8722         /* Report UNLOAD_DONE to MCP */
8723         if (!BP_NOMCP(bp))
8724                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8725 }
8726
8727 static int bnx2x_func_wait_started(struct bnx2x *bp)
8728 {
8729         int tout = 50;
8730         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8731
8732         if (!bp->port.pmf)
8733                 return 0;
8734
8735         /*
8736          * (assumption: No Attention from MCP at this stage)
8737          * PMF probably in the middle of TX disable/enable transaction
8738          * 1. Sync IRS for default SB
8739          * 2. Sync SP queue - this guarantees us that attention handling started
8740          * 3. Wait, that TX disable/enable transaction completes
8741          *
8742          * 1+2 guarantee that if DCBx attention was scheduled it already changed
8743          * pending bit of transaction from STARTED-->TX_STOPPED, if we already
8744          * received completion for the transaction the state is TX_STOPPED.
8745          * State will return to STARTED after completion of TX_STOPPED-->STARTED
8746          * transaction.
8747          */
8748
8749         /* make sure default SB ISR is done */
8750         if (msix)
8751                 synchronize_irq(bp->msix_table[0].vector);
8752         else
8753                 synchronize_irq(bp->pdev->irq);
8754
8755         flush_workqueue(bnx2x_wq);
8756
8757         while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8758                                 BNX2X_F_STATE_STARTED && tout--)
8759                 msleep(20);
8760
8761         if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8762                                                 BNX2X_F_STATE_STARTED) {
8763 #ifdef BNX2X_STOP_ON_ERROR
8764                 BNX2X_ERR("Wrong function state\n");
8765                 return -EBUSY;
8766 #else
8767                 /*
8768                  * Failed to complete the transaction in a "good way"
8769                  * Force both transactions with CLR bit
8770                  */
8771                 struct bnx2x_func_state_params func_params = {NULL};
8772
8773                 DP(NETIF_MSG_IFDOWN,
8774                    "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8775
8776                 func_params.f_obj = &bp->func_obj;
8777                 __set_bit(RAMROD_DRV_CLR_ONLY,
8778                                         &func_params.ramrod_flags);
8779
8780                 /* STARTED-->TX_ST0PPED */
8781                 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8782                 bnx2x_func_state_change(bp, &func_params);
8783
8784                 /* TX_ST0PPED-->STARTED */
8785                 func_params.cmd = BNX2X_F_CMD_TX_START;
8786                 return bnx2x_func_state_change(bp, &func_params);
8787 #endif
8788         }
8789
8790         return 0;
8791 }
8792
8793 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8794 {
8795         int port = BP_PORT(bp);
8796         int i, rc = 0;
8797         u8 cos;
8798         struct bnx2x_mcast_ramrod_params rparam = {NULL};
8799         u32 reset_code;
8800
8801         /* Wait until tx fastpath tasks complete */
8802         for_each_tx_queue(bp, i) {
8803                 struct bnx2x_fastpath *fp = &bp->fp[i];
8804
8805                 for_each_cos_in_tx_queue(fp, cos)
8806                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8807 #ifdef BNX2X_STOP_ON_ERROR
8808                 if (rc)
8809                         return;
8810 #endif
8811         }
8812
8813         /* Give HW time to discard old tx messages */
8814         usleep_range(1000, 2000);
8815
8816         /* Clean all ETH MACs */
8817         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8818                                 false);
8819         if (rc < 0)
8820                 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8821
8822         /* Clean up UC list  */
8823         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8824                                 true);
8825         if (rc < 0)
8826                 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8827                           rc);
8828
8829         /* Disable LLH */
8830         if (!CHIP_IS_E1(bp))
8831                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8832
8833         /* Set "drop all" (stop Rx).
8834          * We need to take a netif_addr_lock() here in order to prevent
8835          * a race between the completion code and this code.
8836          */
8837         netif_addr_lock_bh(bp->dev);
8838         /* Schedule the rx_mode command */
8839         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8840                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8841         else
8842                 bnx2x_set_storm_rx_mode(bp);
8843
8844         /* Cleanup multicast configuration */
8845         rparam.mcast_obj = &bp->mcast_obj;
8846         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8847         if (rc < 0)
8848                 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8849
8850         netif_addr_unlock_bh(bp->dev);
8851
8852         bnx2x_iov_chip_cleanup(bp);
8853
8854         /*
8855          * Send the UNLOAD_REQUEST to the MCP. This will return if
8856          * this function should perform FUNC, PORT or COMMON HW
8857          * reset.
8858          */
8859         reset_code = bnx2x_send_unload_req(bp, unload_mode);
8860
8861         /*
8862          * (assumption: No Attention from MCP at this stage)
8863          * PMF probably in the middle of TX disable/enable transaction
8864          */
8865         rc = bnx2x_func_wait_started(bp);
8866         if (rc) {
8867                 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8868 #ifdef BNX2X_STOP_ON_ERROR
8869                 return;
8870 #endif
8871         }
8872
8873         /* Close multi and leading connections
8874          * Completions for ramrods are collected in a synchronous way
8875          */
8876         for_each_eth_queue(bp, i)
8877                 if (bnx2x_stop_queue(bp, i))
8878 #ifdef BNX2X_STOP_ON_ERROR
8879                         return;
8880 #else
8881                         goto unload_error;
8882 #endif
8883
8884         if (CNIC_LOADED(bp)) {
8885                 for_each_cnic_queue(bp, i)
8886                         if (bnx2x_stop_queue(bp, i))
8887 #ifdef BNX2X_STOP_ON_ERROR
8888                                 return;
8889 #else
8890                                 goto unload_error;
8891 #endif
8892         }
8893
8894         /* If SP settings didn't get completed so far - something
8895          * very wrong has happen.
8896          */
8897         if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8898                 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8899
8900 #ifndef BNX2X_STOP_ON_ERROR
8901 unload_error:
8902 #endif
8903         rc = bnx2x_func_stop(bp);
8904         if (rc) {
8905                 BNX2X_ERR("Function stop failed!\n");
8906 #ifdef BNX2X_STOP_ON_ERROR
8907                 return;
8908 #endif
8909         }
8910
8911         /* Disable HW interrupts, NAPI */
8912         bnx2x_netif_stop(bp, 1);
8913         /* Delete all NAPI objects */
8914         bnx2x_del_all_napi(bp);
8915         if (CNIC_LOADED(bp))
8916                 bnx2x_del_all_napi_cnic(bp);
8917
8918         /* Release IRQs */
8919         bnx2x_free_irq(bp);
8920
8921         /* Reset the chip */
8922         rc = bnx2x_reset_hw(bp, reset_code);
8923         if (rc)
8924                 BNX2X_ERR("HW_RESET failed\n");
8925
8926         /* Report UNLOAD_DONE to MCP */
8927         bnx2x_send_unload_done(bp, keep_link);
8928 }
8929
8930 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8931 {
8932         u32 val;
8933
8934         DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
8935
8936         if (CHIP_IS_E1(bp)) {
8937                 int port = BP_PORT(bp);
8938                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8939                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8940
8941                 val = REG_RD(bp, addr);
8942                 val &= ~(0x300);
8943                 REG_WR(bp, addr, val);
8944         } else {
8945                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8946                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8947                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8948                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8949         }
8950 }
8951
8952 /* Close gates #2, #3 and #4: */
8953 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8954 {
8955         u32 val;
8956
8957         /* Gates #2 and #4a are closed/opened for "not E1" only */
8958         if (!CHIP_IS_E1(bp)) {
8959                 /* #4 */
8960                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
8961                 /* #2 */
8962                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
8963         }
8964
8965         /* #3 */
8966         if (CHIP_IS_E1x(bp)) {
8967                 /* Prevent interrupts from HC on both ports */
8968                 val = REG_RD(bp, HC_REG_CONFIG_1);
8969                 REG_WR(bp, HC_REG_CONFIG_1,
8970                        (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8971                        (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8972
8973                 val = REG_RD(bp, HC_REG_CONFIG_0);
8974                 REG_WR(bp, HC_REG_CONFIG_0,
8975                        (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8976                        (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8977         } else {
8978                 /* Prevent incoming interrupts in IGU */
8979                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8980
8981                 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8982                        (!close) ?
8983                        (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8984                        (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8985         }
8986
8987         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
8988                 close ? "closing" : "opening");
8989         mmiowb();
8990 }
8991
8992 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8993
8994 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8995 {
8996         /* Do some magic... */
8997         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8998         *magic_val = val & SHARED_MF_CLP_MAGIC;
8999         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9000 }
9001
9002 /**
9003  * bnx2x_clp_reset_done - restore the value of the `magic' bit.
9004  *
9005  * @bp:         driver handle
9006  * @magic_val:  old value of the `magic' bit.
9007  */
9008 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9009 {
9010         /* Restore the `magic' bit value... */
9011         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9012         MF_CFG_WR(bp, shared_mf_config.clp_mb,
9013                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9014 }
9015
9016 /**
9017  * bnx2x_reset_mcp_prep - prepare for MCP reset.
9018  *
9019  * @bp:         driver handle
9020  * @magic_val:  old value of 'magic' bit.
9021  *
9022  * Takes care of CLP configurations.
9023  */
9024 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9025 {
9026         u32 shmem;
9027         u32 validity_offset;
9028
9029         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9030
9031         /* Set `magic' bit in order to save MF config */
9032         if (!CHIP_IS_E1(bp))
9033                 bnx2x_clp_reset_prep(bp, magic_val);
9034
9035         /* Get shmem offset */
9036         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9037         validity_offset =
9038                 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9039
9040         /* Clear validity map flags */
9041         if (shmem > 0)
9042                 REG_WR(bp, shmem + validity_offset, 0);
9043 }
9044
9045 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
9046 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
9047
9048 /**
9049  * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9050  *
9051  * @bp: driver handle
9052  */
9053 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9054 {
9055         /* special handling for emulation and FPGA,
9056            wait 10 times longer */
9057         if (CHIP_REV_IS_SLOW(bp))
9058                 msleep(MCP_ONE_TIMEOUT*10);
9059         else
9060                 msleep(MCP_ONE_TIMEOUT);
9061 }
9062
9063 /*
9064  * initializes bp->common.shmem_base and waits for validity signature to appear
9065  */
9066 static int bnx2x_init_shmem(struct bnx2x *bp)
9067 {
9068         int cnt = 0;
9069         u32 val = 0;
9070
9071         do {
9072                 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9073                 if (bp->common.shmem_base) {
9074                         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9075                         if (val & SHR_MEM_VALIDITY_MB)
9076                                 return 0;
9077                 }
9078
9079                 bnx2x_mcp_wait_one(bp);
9080
9081         } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9082
9083         BNX2X_ERR("BAD MCP validity signature\n");
9084
9085         return -ENODEV;
9086 }
9087
9088 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9089 {
9090         int rc = bnx2x_init_shmem(bp);
9091
9092         /* Restore the `magic' bit value */
9093         if (!CHIP_IS_E1(bp))
9094                 bnx2x_clp_reset_done(bp, magic_val);
9095
9096         return rc;
9097 }
9098
9099 static void bnx2x_pxp_prep(struct bnx2x *bp)
9100 {
9101         if (!CHIP_IS_E1(bp)) {
9102                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9103                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9104                 mmiowb();
9105         }
9106 }
9107
9108 /*
9109  * Reset the whole chip except for:
9110  *      - PCIE core
9111  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9112  *              one reset bit)
9113  *      - IGU
9114  *      - MISC (including AEU)
9115  *      - GRC
9116  *      - RBCN, RBCP
9117  */
9118 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9119 {
9120         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9121         u32 global_bits2, stay_reset2;
9122
9123         /*
9124          * Bits that have to be set in reset_mask2 if we want to reset 'global'
9125          * (per chip) blocks.
9126          */
9127         global_bits2 =
9128                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9129                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9130
9131         /* Don't reset the following blocks.
9132          * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9133          *            reset, as in 4 port device they might still be owned
9134          *            by the MCP (there is only one leader per path).
9135          */
9136         not_reset_mask1 =
9137                 MISC_REGISTERS_RESET_REG_1_RST_HC |
9138                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9139                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9140
9141         not_reset_mask2 =
9142                 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9143                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9144                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9145                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9146                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9147                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
9148                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9149                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9150                 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9151                 MISC_REGISTERS_RESET_REG_2_PGLC |
9152                 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9153                 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9154                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9155                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9156                 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9157                 MISC_REGISTERS_RESET_REG_2_UMAC1;
9158
9159         /*
9160          * Keep the following blocks in reset:
9161          *  - all xxMACs are handled by the bnx2x_link code.
9162          */
9163         stay_reset2 =
9164                 MISC_REGISTERS_RESET_REG_2_XMAC |
9165                 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9166
9167         /* Full reset masks according to the chip */
9168         reset_mask1 = 0xffffffff;
9169
9170         if (CHIP_IS_E1(bp))
9171                 reset_mask2 = 0xffff;
9172         else if (CHIP_IS_E1H(bp))
9173                 reset_mask2 = 0x1ffff;
9174         else if (CHIP_IS_E2(bp))
9175                 reset_mask2 = 0xfffff;
9176         else /* CHIP_IS_E3 */
9177                 reset_mask2 = 0x3ffffff;
9178
9179         /* Don't reset global blocks unless we need to */
9180         if (!global)
9181                 reset_mask2 &= ~global_bits2;
9182
9183         /*
9184          * In case of attention in the QM, we need to reset PXP
9185          * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9186          * because otherwise QM reset would release 'close the gates' shortly
9187          * before resetting the PXP, then the PSWRQ would send a write
9188          * request to PGLUE. Then when PXP is reset, PGLUE would try to
9189          * read the payload data from PSWWR, but PSWWR would not
9190          * respond. The write queue in PGLUE would stuck, dmae commands
9191          * would not return. Therefore it's important to reset the second
9192          * reset register (containing the
9193          * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9194          * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9195          * bit).
9196          */
9197         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9198                reset_mask2 & (~not_reset_mask2));
9199
9200         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9201                reset_mask1 & (~not_reset_mask1));
9202
9203         barrier();
9204         mmiowb();
9205
9206         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9207                reset_mask2 & (~stay_reset2));
9208
9209         barrier();
9210         mmiowb();
9211
9212         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9213         mmiowb();
9214 }
9215
9216 /**
9217  * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9218  * It should get cleared in no more than 1s.
9219  *
9220  * @bp: driver handle
9221  *
9222  * It should get cleared in no more than 1s. Returns 0 if
9223  * pending writes bit gets cleared.
9224  */
9225 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9226 {
9227         u32 cnt = 1000;
9228         u32 pend_bits = 0;
9229
9230         do {
9231                 pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9232
9233                 if (pend_bits == 0)
9234                         break;
9235
9236                 usleep_range(1000, 2000);
9237         } while (cnt-- > 0);
9238
9239         if (cnt <= 0) {
9240                 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9241                           pend_bits);
9242                 return -EBUSY;
9243         }
9244
9245         return 0;
9246 }
9247
9248 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9249 {
9250         int cnt = 1000;
9251         u32 val = 0;
9252         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9253         u32 tags_63_32 = 0;
9254
9255         /* Empty the Tetris buffer, wait for 1s */
9256         do {
9257                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9258                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9259                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9260                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9261                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9262                 if (CHIP_IS_E3(bp))
9263                         tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9264
9265                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9266                     ((port_is_idle_0 & 0x1) == 0x1) &&
9267                     ((port_is_idle_1 & 0x1) == 0x1) &&
9268                     (pgl_exp_rom2 == 0xffffffff) &&
9269                     (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9270                         break;
9271                 usleep_range(1000, 2000);
9272         } while (cnt-- > 0);
9273
9274         if (cnt <= 0) {
9275                 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9276                 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9277                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9278                           pgl_exp_rom2);
9279                 return -EAGAIN;
9280         }
9281
9282         barrier();
9283
9284         /* Close gates #2, #3 and #4 */
9285         bnx2x_set_234_gates(bp, true);
9286
9287         /* Poll for IGU VQs for 57712 and newer chips */
9288         if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9289                 return -EAGAIN;
9290
9291         /* TBD: Indicate that "process kill" is in progress to MCP */
9292
9293         /* Clear "unprepared" bit */
9294         REG_WR(bp, MISC_REG_UNPREPARED, 0);
9295         barrier();
9296
9297         /* Make sure all is written to the chip before the reset */
9298         mmiowb();
9299
9300         /* Wait for 1ms to empty GLUE and PCI-E core queues,
9301          * PSWHST, GRC and PSWRD Tetris buffer.
9302          */
9303         usleep_range(1000, 2000);
9304
9305         /* Prepare to chip reset: */
9306         /* MCP */
9307         if (global)
9308                 bnx2x_reset_mcp_prep(bp, &val);
9309
9310         /* PXP */
9311         bnx2x_pxp_prep(bp);
9312         barrier();
9313
9314         /* reset the chip */
9315         bnx2x_process_kill_chip_reset(bp, global);
9316         barrier();
9317
9318         /* Recover after reset: */
9319         /* MCP */
9320         if (global && bnx2x_reset_mcp_comp(bp, val))
9321                 return -EAGAIN;
9322
9323         /* TBD: Add resetting the NO_MCP mode DB here */
9324
9325         /* Open the gates #2, #3 and #4 */
9326         bnx2x_set_234_gates(bp, false);
9327
9328         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9329          * reset state, re-enable attentions. */
9330
9331         return 0;
9332 }
9333
9334 static int bnx2x_leader_reset(struct bnx2x *bp)
9335 {
9336         int rc = 0;
9337         bool global = bnx2x_reset_is_global(bp);
9338         u32 load_code;
9339
9340         /* if not going to reset MCP - load "fake" driver to reset HW while
9341          * driver is owner of the HW
9342          */
9343         if (!global && !BP_NOMCP(bp)) {
9344                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9345                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9346                 if (!load_code) {
9347                         BNX2X_ERR("MCP response failure, aborting\n");
9348                         rc = -EAGAIN;
9349                         goto exit_leader_reset;
9350                 }
9351                 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9352                     (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9353                         BNX2X_ERR("MCP unexpected resp, aborting\n");
9354                         rc = -EAGAIN;
9355                         goto exit_leader_reset2;
9356                 }
9357                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9358                 if (!load_code) {
9359                         BNX2X_ERR("MCP response failure, aborting\n");
9360                         rc = -EAGAIN;
9361                         goto exit_leader_reset2;
9362                 }
9363         }
9364
9365         /* Try to recover after the failure */
9366         if (bnx2x_process_kill(bp, global)) {
9367                 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9368                           BP_PATH(bp));
9369                 rc = -EAGAIN;
9370                 goto exit_leader_reset2;
9371         }
9372
9373         /*
9374          * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9375          * state.
9376          */
9377         bnx2x_set_reset_done(bp);
9378         if (global)
9379                 bnx2x_clear_reset_global(bp);
9380
9381 exit_leader_reset2:
9382         /* unload "fake driver" if it was loaded */
9383         if (!global && !BP_NOMCP(bp)) {
9384                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9385                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9386         }
9387 exit_leader_reset:
9388         bp->is_leader = 0;
9389         bnx2x_release_leader_lock(bp);
9390         smp_mb();
9391         return rc;
9392 }
9393
9394 static void bnx2x_recovery_failed(struct bnx2x *bp)
9395 {
9396         netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9397
9398         /* Disconnect this device */
9399         netif_device_detach(bp->dev);
9400
9401         /*
9402          * Block ifup for all function on this engine until "process kill"
9403          * or power cycle.
9404          */
9405         bnx2x_set_reset_in_progress(bp);
9406
9407         /* Shut down the power */
9408         bnx2x_set_power_state(bp, PCI_D3hot);
9409
9410         bp->recovery_state = BNX2X_RECOVERY_FAILED;
9411
9412         smp_mb();
9413 }
9414
9415 /*
9416  * Assumption: runs under rtnl lock. This together with the fact
9417  * that it's called only from bnx2x_sp_rtnl() ensure that it
9418  * will never be called when netif_running(bp->dev) is false.
9419  */
9420 static void bnx2x_parity_recover(struct bnx2x *bp)
9421 {
9422         bool global = false;
9423         u32 error_recovered, error_unrecovered;
9424         bool is_parity;
9425
9426         DP(NETIF_MSG_HW, "Handling parity\n");
9427         while (1) {
9428                 switch (bp->recovery_state) {
9429                 case BNX2X_RECOVERY_INIT:
9430                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9431                         is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9432                         WARN_ON(!is_parity);
9433
9434                         /* Try to get a LEADER_LOCK HW lock */
9435                         if (bnx2x_trylock_leader_lock(bp)) {
9436                                 bnx2x_set_reset_in_progress(bp);
9437                                 /*
9438                                  * Check if there is a global attention and if
9439                                  * there was a global attention, set the global
9440                                  * reset bit.
9441                                  */
9442
9443                                 if (global)
9444                                         bnx2x_set_reset_global(bp);
9445
9446                                 bp->is_leader = 1;
9447                         }
9448
9449                         /* Stop the driver */
9450                         /* If interface has been removed - break */
9451                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9452                                 return;
9453
9454                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
9455
9456                         /* Ensure "is_leader", MCP command sequence and
9457                          * "recovery_state" update values are seen on other
9458                          * CPUs.
9459                          */
9460                         smp_mb();
9461                         break;
9462
9463                 case BNX2X_RECOVERY_WAIT:
9464                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9465                         if (bp->is_leader) {
9466                                 int other_engine = BP_PATH(bp) ? 0 : 1;
9467                                 bool other_load_status =
9468                                         bnx2x_get_load_status(bp, other_engine);
9469                                 bool load_status =
9470                                         bnx2x_get_load_status(bp, BP_PATH(bp));
9471                                 global = bnx2x_reset_is_global(bp);
9472
9473                                 /*
9474                                  * In case of a parity in a global block, let
9475                                  * the first leader that performs a
9476                                  * leader_reset() reset the global blocks in
9477                                  * order to clear global attentions. Otherwise
9478                                  * the gates will remain closed for that
9479                                  * engine.
9480                                  */
9481                                 if (load_status ||
9482                                     (global && other_load_status)) {
9483                                         /* Wait until all other functions get
9484                                          * down.
9485                                          */
9486                                         schedule_delayed_work(&bp->sp_rtnl_task,
9487                                                                 HZ/10);
9488                                         return;
9489                                 } else {
9490                                         /* If all other functions got down -
9491                                          * try to bring the chip back to
9492                                          * normal. In any case it's an exit
9493                                          * point for a leader.
9494                                          */
9495                                         if (bnx2x_leader_reset(bp)) {
9496                                                 bnx2x_recovery_failed(bp);
9497                                                 return;
9498                                         }
9499
9500                                         /* If we are here, means that the
9501                                          * leader has succeeded and doesn't
9502                                          * want to be a leader any more. Try
9503                                          * to continue as a none-leader.
9504                                          */
9505                                         break;
9506                                 }
9507                         } else { /* non-leader */
9508                                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9509                                         /* Try to get a LEADER_LOCK HW lock as
9510                                          * long as a former leader may have
9511                                          * been unloaded by the user or
9512                                          * released a leadership by another
9513                                          * reason.
9514                                          */
9515                                         if (bnx2x_trylock_leader_lock(bp)) {
9516                                                 /* I'm a leader now! Restart a
9517                                                  * switch case.
9518                                                  */
9519                                                 bp->is_leader = 1;
9520                                                 break;
9521                                         }
9522
9523                                         schedule_delayed_work(&bp->sp_rtnl_task,
9524                                                                 HZ/10);
9525                                         return;
9526
9527                                 } else {
9528                                         /*
9529                                          * If there was a global attention, wait
9530                                          * for it to be cleared.
9531                                          */
9532                                         if (bnx2x_reset_is_global(bp)) {
9533                                                 schedule_delayed_work(
9534                                                         &bp->sp_rtnl_task,
9535                                                         HZ/10);
9536                                                 return;
9537                                         }
9538
9539                                         error_recovered =
9540                                           bp->eth_stats.recoverable_error;
9541                                         error_unrecovered =
9542                                           bp->eth_stats.unrecoverable_error;
9543                                         bp->recovery_state =
9544                                                 BNX2X_RECOVERY_NIC_LOADING;
9545                                         if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9546                                                 error_unrecovered++;
9547                                                 netdev_err(bp->dev,
9548                                                            "Recovery failed. Power cycle needed\n");
9549                                                 /* Disconnect this device */
9550                                                 netif_device_detach(bp->dev);
9551                                                 /* Shut down the power */
9552                                                 bnx2x_set_power_state(
9553                                                         bp, PCI_D3hot);
9554                                                 smp_mb();
9555                                         } else {
9556                                                 bp->recovery_state =
9557                                                         BNX2X_RECOVERY_DONE;
9558                                                 error_recovered++;
9559                                                 smp_mb();
9560                                         }
9561                                         bp->eth_stats.recoverable_error =
9562                                                 error_recovered;
9563                                         bp->eth_stats.unrecoverable_error =
9564                                                 error_unrecovered;
9565
9566                                         return;
9567                                 }
9568                         }
9569                 default:
9570                         return;
9571                 }
9572         }
9573 }
9574
9575 static int bnx2x_close(struct net_device *dev);
9576
9577 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9578  * scheduled on a general queue in order to prevent a dead lock.
9579  */
9580 static void bnx2x_sp_rtnl_task(struct work_struct *work)
9581 {
9582         struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9583
9584         rtnl_lock();
9585
9586         if (!netif_running(bp->dev)) {
9587                 rtnl_unlock();
9588                 return;
9589         }
9590
9591         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9592 #ifdef BNX2X_STOP_ON_ERROR
9593                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9594                           "you will need to reboot when done\n");
9595                 goto sp_rtnl_not_reset;
9596 #endif
9597                 /*
9598                  * Clear all pending SP commands as we are going to reset the
9599                  * function anyway.
9600                  */
9601                 bp->sp_rtnl_state = 0;
9602                 smp_mb();
9603
9604                 bnx2x_parity_recover(bp);
9605
9606                 rtnl_unlock();
9607                 return;
9608         }
9609
9610         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9611 #ifdef BNX2X_STOP_ON_ERROR
9612                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9613                           "you will need to reboot when done\n");
9614                 goto sp_rtnl_not_reset;
9615 #endif
9616
9617                 /*
9618                  * Clear all pending SP commands as we are going to reset the
9619                  * function anyway.
9620                  */
9621                 bp->sp_rtnl_state = 0;
9622                 smp_mb();
9623
9624                 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9625                 bnx2x_nic_load(bp, LOAD_NORMAL);
9626
9627                 rtnl_unlock();
9628                 return;
9629         }
9630 #ifdef BNX2X_STOP_ON_ERROR
9631 sp_rtnl_not_reset:
9632 #endif
9633         if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9634                 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9635         if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9636                 bnx2x_after_function_update(bp);
9637         /*
9638          * in case of fan failure we need to reset id if the "stop on error"
9639          * debug flag is set, since we trying to prevent permanent overheating
9640          * damage
9641          */
9642         if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9643                 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9644                 netif_device_detach(bp->dev);
9645                 bnx2x_close(bp->dev);
9646                 rtnl_unlock();
9647                 return;
9648         }
9649
9650         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9651                 DP(BNX2X_MSG_SP,
9652                    "sending set mcast vf pf channel message from rtnl sp-task\n");
9653                 bnx2x_vfpf_set_mcast(bp->dev);
9654         }
9655         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
9656                                &bp->sp_rtnl_state)){
9657                 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
9658                         bnx2x_tx_disable(bp);
9659                         BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
9660                 }
9661         }
9662
9663         if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
9664                 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
9665                 bnx2x_set_rx_mode_inner(bp);
9666         }
9667
9668         if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9669                                &bp->sp_rtnl_state))
9670                 bnx2x_pf_set_vfs_vlan(bp);
9671
9672         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
9673                 bnx2x_dcbx_stop_hw_tx(bp);
9674
9675         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
9676                 bnx2x_dcbx_resume_hw_tx(bp);
9677
9678         /* work which needs rtnl lock not-taken (as it takes the lock itself and
9679          * can be called from other contexts as well)
9680          */
9681         rtnl_unlock();
9682
9683         /* enable SR-IOV if applicable */
9684         if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
9685                                                &bp->sp_rtnl_state)) {
9686                 bnx2x_disable_sriov(bp);
9687                 bnx2x_enable_sriov(bp);
9688         }
9689 }
9690
9691 static void bnx2x_period_task(struct work_struct *work)
9692 {
9693         struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9694
9695         if (!netif_running(bp->dev))
9696                 goto period_task_exit;
9697
9698         if (CHIP_REV_IS_SLOW(bp)) {
9699                 BNX2X_ERR("period task called on emulation, ignoring\n");
9700                 goto period_task_exit;
9701         }
9702
9703         bnx2x_acquire_phy_lock(bp);
9704         /*
9705          * The barrier is needed to ensure the ordering between the writing to
9706          * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9707          * the reading here.
9708          */
9709         smp_mb();
9710         if (bp->port.pmf) {
9711                 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9712
9713                 /* Re-queue task in 1 sec */
9714                 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9715         }
9716
9717         bnx2x_release_phy_lock(bp);
9718 period_task_exit:
9719         return;
9720 }
9721
9722 /*
9723  * Init service functions
9724  */
9725
9726 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9727 {
9728         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9729         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9730         return base + (BP_ABS_FUNC(bp)) * stride;
9731 }
9732
9733 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9734                                         struct bnx2x_mac_vals *vals)
9735 {
9736         u32 val, base_addr, offset, mask, reset_reg;
9737         bool mac_stopped = false;
9738         u8 port = BP_PORT(bp);
9739
9740         /* reset addresses as they also mark which values were changed */
9741         vals->bmac_addr = 0;
9742         vals->umac_addr = 0;
9743         vals->xmac_addr = 0;
9744         vals->emac_addr = 0;
9745
9746         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9747
9748         if (!CHIP_IS_E3(bp)) {
9749                 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9750                 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9751                 if ((mask & reset_reg) && val) {
9752                         u32 wb_data[2];
9753                         BNX2X_DEV_INFO("Disable bmac Rx\n");
9754                         base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9755                                                 : NIG_REG_INGRESS_BMAC0_MEM;
9756                         offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9757                                                 : BIGMAC_REGISTER_BMAC_CONTROL;
9758
9759                         /*
9760                          * use rd/wr since we cannot use dmae. This is safe
9761                          * since MCP won't access the bus due to the request
9762                          * to unload, and no function on the path can be
9763                          * loaded at this time.
9764                          */
9765                         wb_data[0] = REG_RD(bp, base_addr + offset);
9766                         wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9767                         vals->bmac_addr = base_addr + offset;
9768                         vals->bmac_val[0] = wb_data[0];
9769                         vals->bmac_val[1] = wb_data[1];
9770                         wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9771                         REG_WR(bp, vals->bmac_addr, wb_data[0]);
9772                         REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
9773                 }
9774                 BNX2X_DEV_INFO("Disable emac Rx\n");
9775                 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9776                 vals->emac_val = REG_RD(bp, vals->emac_addr);
9777                 REG_WR(bp, vals->emac_addr, 0);
9778                 mac_stopped = true;
9779         } else {
9780                 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9781                         BNX2X_DEV_INFO("Disable xmac Rx\n");
9782                         base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9783                         val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9784                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9785                                val & ~(1 << 1));
9786                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9787                                val | (1 << 1));
9788                         vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9789                         vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9790                         REG_WR(bp, vals->xmac_addr, 0);
9791                         mac_stopped = true;
9792                 }
9793                 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9794                 if (mask & reset_reg) {
9795                         BNX2X_DEV_INFO("Disable umac Rx\n");
9796                         base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9797                         vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9798                         vals->umac_val = REG_RD(bp, vals->umac_addr);
9799                         REG_WR(bp, vals->umac_addr, 0);
9800                         mac_stopped = true;
9801                 }
9802         }
9803
9804         if (mac_stopped)
9805                 msleep(20);
9806 }
9807
9808 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9809 #define BNX2X_PREV_UNDI_RCQ(val)        ((val) & 0xffff)
9810 #define BNX2X_PREV_UNDI_BD(val)         ((val) >> 16 & 0xffff)
9811 #define BNX2X_PREV_UNDI_PROD(rcq, bd)   ((bd) << 16 | (rcq))
9812
9813 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9814 {
9815         u16 rcq, bd;
9816         u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9817
9818         rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9819         bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9820
9821         tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9822         REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9823
9824         BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9825                        port, bd, rcq);
9826 }
9827
9828 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9829 {
9830         u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9831                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9832         if (!rc) {
9833                 BNX2X_ERR("MCP response failure, aborting\n");
9834                 return -EBUSY;
9835         }
9836
9837         return 0;
9838 }
9839
9840 static struct bnx2x_prev_path_list *
9841                 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9842 {
9843         struct bnx2x_prev_path_list *tmp_list;
9844
9845         list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9846                 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9847                     bp->pdev->bus->number == tmp_list->bus &&
9848                     BP_PATH(bp) == tmp_list->path)
9849                         return tmp_list;
9850
9851         return NULL;
9852 }
9853
9854 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9855 {
9856         struct bnx2x_prev_path_list *tmp_list;
9857         int rc;
9858
9859         rc = down_interruptible(&bnx2x_prev_sem);
9860         if (rc) {
9861                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9862                 return rc;
9863         }
9864
9865         tmp_list = bnx2x_prev_path_get_entry(bp);
9866         if (tmp_list) {
9867                 tmp_list->aer = 1;
9868                 rc = 0;
9869         } else {
9870                 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9871                           BP_PATH(bp));
9872         }
9873
9874         up(&bnx2x_prev_sem);
9875
9876         return rc;
9877 }
9878
9879 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9880 {
9881         struct bnx2x_prev_path_list *tmp_list;
9882         int rc = false;
9883
9884         if (down_trylock(&bnx2x_prev_sem))
9885                 return false;
9886
9887         tmp_list = bnx2x_prev_path_get_entry(bp);
9888         if (tmp_list) {
9889                 if (tmp_list->aer) {
9890                         DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9891                            BP_PATH(bp));
9892                 } else {
9893                         rc = true;
9894                         BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9895                                        BP_PATH(bp));
9896                 }
9897         }
9898
9899         up(&bnx2x_prev_sem);
9900
9901         return rc;
9902 }
9903
9904 bool bnx2x_port_after_undi(struct bnx2x *bp)
9905 {
9906         struct bnx2x_prev_path_list *entry;
9907         bool val;
9908
9909         down(&bnx2x_prev_sem);
9910
9911         entry = bnx2x_prev_path_get_entry(bp);
9912         val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9913
9914         up(&bnx2x_prev_sem);
9915
9916         return val;
9917 }
9918
9919 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9920 {
9921         struct bnx2x_prev_path_list *tmp_list;
9922         int rc;
9923
9924         rc = down_interruptible(&bnx2x_prev_sem);
9925         if (rc) {
9926                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9927                 return rc;
9928         }
9929
9930         /* Check whether the entry for this path already exists */
9931         tmp_list = bnx2x_prev_path_get_entry(bp);
9932         if (tmp_list) {
9933                 if (!tmp_list->aer) {
9934                         BNX2X_ERR("Re-Marking the path.\n");
9935                 } else {
9936                         DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9937                            BP_PATH(bp));
9938                         tmp_list->aer = 0;
9939                 }
9940                 up(&bnx2x_prev_sem);
9941                 return 0;
9942         }
9943         up(&bnx2x_prev_sem);
9944
9945         /* Create an entry for this path and add it */
9946         tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9947         if (!tmp_list) {
9948                 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9949                 return -ENOMEM;
9950         }
9951
9952         tmp_list->bus = bp->pdev->bus->number;
9953         tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9954         tmp_list->path = BP_PATH(bp);
9955         tmp_list->aer = 0;
9956         tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9957
9958         rc = down_interruptible(&bnx2x_prev_sem);
9959         if (rc) {
9960                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9961                 kfree(tmp_list);
9962         } else {
9963                 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
9964                    BP_PATH(bp));
9965                 list_add(&tmp_list->list, &bnx2x_prev_list);
9966                 up(&bnx2x_prev_sem);
9967         }
9968
9969         return rc;
9970 }
9971
9972 static int bnx2x_do_flr(struct bnx2x *bp)
9973 {
9974         struct pci_dev *dev = bp->pdev;
9975
9976         if (CHIP_IS_E1x(bp)) {
9977                 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9978                 return -EINVAL;
9979         }
9980
9981         /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9982         if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9983                 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9984                           bp->common.bc_ver);
9985                 return -EINVAL;
9986         }
9987
9988         if (!pci_wait_for_pending_transaction(dev))
9989                 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
9990
9991         BNX2X_DEV_INFO("Initiating FLR\n");
9992         bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9993
9994         return 0;
9995 }
9996
9997 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9998 {
9999         int rc;
10000
10001         BNX2X_DEV_INFO("Uncommon unload Flow\n");
10002
10003         /* Test if previous unload process was already finished for this path */
10004         if (bnx2x_prev_is_path_marked(bp))
10005                 return bnx2x_prev_mcp_done(bp);
10006
10007         BNX2X_DEV_INFO("Path is unmarked\n");
10008
10009         /* If function has FLR capabilities, and existing FW version matches
10010          * the one required, then FLR will be sufficient to clean any residue
10011          * left by previous driver
10012          */
10013         rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
10014
10015         if (!rc) {
10016                 /* fw version is good */
10017                 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10018                 rc = bnx2x_do_flr(bp);
10019         }
10020
10021         if (!rc) {
10022                 /* FLR was performed */
10023                 BNX2X_DEV_INFO("FLR successful\n");
10024                 return 0;
10025         }
10026
10027         BNX2X_DEV_INFO("Could not FLR\n");
10028
10029         /* Close the MCP request, return failure*/
10030         rc = bnx2x_prev_mcp_done(bp);
10031         if (!rc)
10032                 rc = BNX2X_PREV_WAIT_NEEDED;
10033
10034         return rc;
10035 }
10036
10037 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10038 {
10039         u32 reset_reg, tmp_reg = 0, rc;
10040         bool prev_undi = false;
10041         struct bnx2x_mac_vals mac_vals;
10042
10043         /* It is possible a previous function received 'common' answer,
10044          * but hasn't loaded yet, therefore creating a scenario of
10045          * multiple functions receiving 'common' on the same path.
10046          */
10047         BNX2X_DEV_INFO("Common unload Flow\n");
10048
10049         memset(&mac_vals, 0, sizeof(mac_vals));
10050
10051         if (bnx2x_prev_is_path_marked(bp))
10052                 return bnx2x_prev_mcp_done(bp);
10053
10054         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10055
10056         /* Reset should be performed after BRB is emptied */
10057         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10058                 u32 timer_count = 1000;
10059
10060                 /* Close the MAC Rx to prevent BRB from filling up */
10061                 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10062
10063                 /* close LLH filters towards the BRB */
10064                 bnx2x_set_rx_filter(&bp->link_params, 0);
10065
10066                 /* Check if the UNDI driver was previously loaded
10067                  * UNDI driver initializes CID offset for normal bell to 0x7
10068                  */
10069                 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
10070                         tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
10071                         if (tmp_reg == 0x7) {
10072                                 BNX2X_DEV_INFO("UNDI previously loaded\n");
10073                                 prev_undi = true;
10074                                 /* clear the UNDI indication */
10075                                 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10076                                 /* clear possible idle check errors */
10077                                 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10078                         }
10079                 }
10080                 if (!CHIP_IS_E1x(bp))
10081                         /* block FW from writing to host */
10082                         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10083
10084                 /* wait until BRB is empty */
10085                 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10086                 while (timer_count) {
10087                         u32 prev_brb = tmp_reg;
10088
10089                         tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10090                         if (!tmp_reg)
10091                                 break;
10092
10093                         BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10094
10095                         /* reset timer as long as BRB actually gets emptied */
10096                         if (prev_brb > tmp_reg)
10097                                 timer_count = 1000;
10098                         else
10099                                 timer_count--;
10100
10101                         /* If UNDI resides in memory, manually increment it */
10102                         if (prev_undi)
10103                                 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
10104
10105                         udelay(10);
10106                 }
10107
10108                 if (!timer_count)
10109                         BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10110         }
10111
10112         /* No packets are in the pipeline, path is ready for reset */
10113         bnx2x_reset_common(bp);
10114
10115         if (mac_vals.xmac_addr)
10116                 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10117         if (mac_vals.umac_addr)
10118                 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
10119         if (mac_vals.emac_addr)
10120                 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10121         if (mac_vals.bmac_addr) {
10122                 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10123                 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10124         }
10125
10126         rc = bnx2x_prev_mark_path(bp, prev_undi);
10127         if (rc) {
10128                 bnx2x_prev_mcp_done(bp);
10129                 return rc;
10130         }
10131
10132         return bnx2x_prev_mcp_done(bp);
10133 }
10134
10135 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
10136  * and boot began, or when kdump kernel was loaded. Either case would invalidate
10137  * the addresses of the transaction, resulting in was-error bit set in the pci
10138  * causing all hw-to-host pcie transactions to timeout. If this happened we want
10139  * to clear the interrupt which detected this from the pglueb and the was done
10140  * bit
10141  */
10142 static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
10143 {
10144         if (!CHIP_IS_E1x(bp)) {
10145                 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10146                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
10147                         DP(BNX2X_MSG_SP,
10148                            "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
10149                         REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10150                                1 << BP_FUNC(bp));
10151                 }
10152         }
10153 }
10154
10155 static int bnx2x_prev_unload(struct bnx2x *bp)
10156 {
10157         int time_counter = 10;
10158         u32 rc, fw, hw_lock_reg, hw_lock_val;
10159         BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10160
10161         /* clear hw from errors which may have resulted from an interrupted
10162          * dmae transaction.
10163          */
10164         bnx2x_prev_interrupted_dmae(bp);
10165
10166         /* Release previously held locks */
10167         hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10168                       (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10169                       (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10170
10171         hw_lock_val = REG_RD(bp, hw_lock_reg);
10172         if (hw_lock_val) {
10173                 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10174                         BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10175                         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10176                                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10177                 }
10178
10179                 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10180                 REG_WR(bp, hw_lock_reg, 0xffffffff);
10181         } else
10182                 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10183
10184         if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10185                 BNX2X_DEV_INFO("Release previously held alr\n");
10186                 bnx2x_release_alr(bp);
10187         }
10188
10189         do {
10190                 int aer = 0;
10191                 /* Lock MCP using an unload request */
10192                 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10193                 if (!fw) {
10194                         BNX2X_ERR("MCP response failure, aborting\n");
10195                         rc = -EBUSY;
10196                         break;
10197                 }
10198
10199                 rc = down_interruptible(&bnx2x_prev_sem);
10200                 if (rc) {
10201                         BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10202                                   rc);
10203                 } else {
10204                         /* If Path is marked by EEH, ignore unload status */
10205                         aer = !!(bnx2x_prev_path_get_entry(bp) &&
10206                                  bnx2x_prev_path_get_entry(bp)->aer);
10207                         up(&bnx2x_prev_sem);
10208                 }
10209
10210                 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10211                         rc = bnx2x_prev_unload_common(bp);
10212                         break;
10213                 }
10214
10215                 /* non-common reply from MCP might require looping */
10216                 rc = bnx2x_prev_unload_uncommon(bp);
10217                 if (rc != BNX2X_PREV_WAIT_NEEDED)
10218                         break;
10219
10220                 msleep(20);
10221         } while (--time_counter);
10222
10223         if (!time_counter || rc) {
10224                 BNX2X_ERR("Failed unloading previous driver, aborting\n");
10225                 rc = -EBUSY;
10226         }
10227
10228         /* Mark function if its port was used to boot from SAN */
10229         if (bnx2x_port_after_undi(bp))
10230                 bp->link_params.feature_config_flags |=
10231                         FEATURE_CONFIG_BOOT_FROM_SAN;
10232
10233         BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10234
10235         return rc;
10236 }
10237
10238 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10239 {
10240         u32 val, val2, val3, val4, id, boot_mode;
10241         u16 pmc;
10242
10243         /* Get the chip revision id and number. */
10244         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10245         val = REG_RD(bp, MISC_REG_CHIP_NUM);
10246         id = ((val & 0xffff) << 16);
10247         val = REG_RD(bp, MISC_REG_CHIP_REV);
10248         id |= ((val & 0xf) << 12);
10249
10250         /* Metal is read from PCI regs, but we can't access >=0x400 from
10251          * the configuration space (so we need to reg_rd)
10252          */
10253         val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10254         id |= (((val >> 24) & 0xf) << 4);
10255         val = REG_RD(bp, MISC_REG_BOND_ID);
10256         id |= (val & 0xf);
10257         bp->common.chip_id = id;
10258
10259         /* force 57811 according to MISC register */
10260         if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10261                 if (CHIP_IS_57810(bp))
10262                         bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10263                                 (bp->common.chip_id & 0x0000FFFF);
10264                 else if (CHIP_IS_57810_MF(bp))
10265                         bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10266                                 (bp->common.chip_id & 0x0000FFFF);
10267                 bp->common.chip_id |= 0x1;
10268         }
10269
10270         /* Set doorbell size */
10271         bp->db_size = (1 << BNX2X_DB_SHIFT);
10272
10273         if (!CHIP_IS_E1x(bp)) {
10274                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10275                 if ((val & 1) == 0)
10276                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10277                 else
10278                         val = (val >> 1) & 1;
10279                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10280                                                        "2_PORT_MODE");
10281                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10282                                                  CHIP_2_PORT_MODE;
10283
10284                 if (CHIP_MODE_IS_4_PORT(bp))
10285                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
10286                 else
10287                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
10288         } else {
10289                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10290                 bp->pfid = bp->pf_num;                  /* 0..7 */
10291         }
10292
10293         BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10294
10295         bp->link_params.chip_id = bp->common.chip_id;
10296         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10297
10298         val = (REG_RD(bp, 0x2874) & 0x55);
10299         if ((bp->common.chip_id & 0x1) ||
10300             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10301                 bp->flags |= ONE_PORT_FLAG;
10302                 BNX2X_DEV_INFO("single port device\n");
10303         }
10304
10305         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10306         bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10307                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
10308         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10309                        bp->common.flash_size, bp->common.flash_size);
10310
10311         bnx2x_init_shmem(bp);
10312
10313         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10314                                         MISC_REG_GENERIC_CR_1 :
10315                                         MISC_REG_GENERIC_CR_0));
10316
10317         bp->link_params.shmem_base = bp->common.shmem_base;
10318         bp->link_params.shmem2_base = bp->common.shmem2_base;
10319         if (SHMEM2_RD(bp, size) >
10320             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10321                 bp->link_params.lfa_base =
10322                 REG_RD(bp, bp->common.shmem2_base +
10323                        (u32)offsetof(struct shmem2_region,
10324                                      lfa_host_addr[BP_PORT(bp)]));
10325         else
10326                 bp->link_params.lfa_base = 0;
10327         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
10328                        bp->common.shmem_base, bp->common.shmem2_base);
10329
10330         if (!bp->common.shmem_base) {
10331                 BNX2X_DEV_INFO("MCP not active\n");
10332                 bp->flags |= NO_MCP_FLAG;
10333                 return;
10334         }
10335
10336         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10337         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10338
10339         bp->link_params.hw_led_mode = ((bp->common.hw_config &
10340                                         SHARED_HW_CFG_LED_MODE_MASK) >>
10341                                        SHARED_HW_CFG_LED_MODE_SHIFT);
10342
10343         bp->link_params.feature_config_flags = 0;
10344         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10345         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10346                 bp->link_params.feature_config_flags |=
10347                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10348         else
10349                 bp->link_params.feature_config_flags &=
10350                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10351
10352         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10353         bp->common.bc_ver = val;
10354         BNX2X_DEV_INFO("bc_ver %X\n", val);
10355         if (val < BNX2X_BC_VER) {
10356                 /* for now only warn
10357                  * later we might need to enforce this */
10358                 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10359                           BNX2X_BC_VER, val);
10360         }
10361         bp->link_params.feature_config_flags |=
10362                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10363                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10364
10365         bp->link_params.feature_config_flags |=
10366                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10367                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10368         bp->link_params.feature_config_flags |=
10369                 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10370                 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10371         bp->link_params.feature_config_flags |=
10372                 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10373                 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10374
10375         bp->link_params.feature_config_flags |=
10376                 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10377                 FEATURE_CONFIG_MT_SUPPORT : 0;
10378
10379         bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10380                         BC_SUPPORTS_PFC_STATS : 0;
10381
10382         bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10383                         BC_SUPPORTS_FCOE_FEATURES : 0;
10384
10385         bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10386                         BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10387
10388         bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
10389                         BC_SUPPORTS_RMMOD_CMD : 0;
10390
10391         boot_mode = SHMEM_RD(bp,
10392                         dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10393                         PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10394         switch (boot_mode) {
10395         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10396                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10397                 break;
10398         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10399                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10400                 break;
10401         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10402                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10403                 break;
10404         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10405                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10406                 break;
10407         }
10408
10409         pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
10410         bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10411
10412         BNX2X_DEV_INFO("%sWoL capable\n",
10413                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
10414
10415         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10416         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10417         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10418         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10419
10420         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10421                  val, val2, val3, val4);
10422 }
10423
10424 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10425 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10426
10427 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10428 {
10429         int pfid = BP_FUNC(bp);
10430         int igu_sb_id;
10431         u32 val;
10432         u8 fid, igu_sb_cnt = 0;
10433
10434         bp->igu_base_sb = 0xff;
10435         if (CHIP_INT_MODE_IS_BC(bp)) {
10436                 int vn = BP_VN(bp);
10437                 igu_sb_cnt = bp->igu_sb_cnt;
10438                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10439                         FP_SB_MAX_E1x;
10440
10441                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
10442                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10443
10444                 return 0;
10445         }
10446
10447         /* IGU in normal mode - read CAM */
10448         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10449              igu_sb_id++) {
10450                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10451                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10452                         continue;
10453                 fid = IGU_FID(val);
10454                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10455                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10456                                 continue;
10457                         if (IGU_VEC(val) == 0)
10458                                 /* default status block */
10459                                 bp->igu_dsb_id = igu_sb_id;
10460                         else {
10461                                 if (bp->igu_base_sb == 0xff)
10462                                         bp->igu_base_sb = igu_sb_id;
10463                                 igu_sb_cnt++;
10464                         }
10465                 }
10466         }
10467
10468 #ifdef CONFIG_PCI_MSI
10469         /* Due to new PF resource allocation by MFW T7.4 and above, it's
10470          * optional that number of CAM entries will not be equal to the value
10471          * advertised in PCI.
10472          * Driver should use the minimal value of both as the actual status
10473          * block count
10474          */
10475         bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10476 #endif
10477
10478         if (igu_sb_cnt == 0) {
10479                 BNX2X_ERR("CAM configuration error\n");
10480                 return -EINVAL;
10481         }
10482
10483         return 0;
10484 }
10485
10486 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10487 {
10488         int cfg_size = 0, idx, port = BP_PORT(bp);
10489
10490         /* Aggregation of supported attributes of all external phys */
10491         bp->port.supported[0] = 0;
10492         bp->port.supported[1] = 0;
10493         switch (bp->link_params.num_phys) {
10494         case 1:
10495                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10496                 cfg_size = 1;
10497                 break;
10498         case 2:
10499                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10500                 cfg_size = 1;
10501                 break;
10502         case 3:
10503                 if (bp->link_params.multi_phy_config &
10504                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10505                         bp->port.supported[1] =
10506                                 bp->link_params.phy[EXT_PHY1].supported;
10507                         bp->port.supported[0] =
10508                                 bp->link_params.phy[EXT_PHY2].supported;
10509                 } else {
10510                         bp->port.supported[0] =
10511                                 bp->link_params.phy[EXT_PHY1].supported;
10512                         bp->port.supported[1] =
10513                                 bp->link_params.phy[EXT_PHY2].supported;
10514                 }
10515                 cfg_size = 2;
10516                 break;
10517         }
10518
10519         if (!(bp->port.supported[0] || bp->port.supported[1])) {
10520                 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10521                            SHMEM_RD(bp,
10522                            dev_info.port_hw_config[port].external_phy_config),
10523                            SHMEM_RD(bp,
10524                            dev_info.port_hw_config[port].external_phy_config2));
10525                         return;
10526         }
10527
10528         if (CHIP_IS_E3(bp))
10529                 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10530         else {
10531                 switch (switch_cfg) {
10532                 case SWITCH_CFG_1G:
10533                         bp->port.phy_addr = REG_RD(
10534                                 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10535                         break;
10536                 case SWITCH_CFG_10G:
10537                         bp->port.phy_addr = REG_RD(
10538                                 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10539                         break;
10540                 default:
10541                         BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10542                                   bp->port.link_config[0]);
10543                         return;
10544                 }
10545         }
10546         BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10547         /* mask what we support according to speed_cap_mask per configuration */
10548         for (idx = 0; idx < cfg_size; idx++) {
10549                 if (!(bp->link_params.speed_cap_mask[idx] &
10550                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10551                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10552
10553                 if (!(bp->link_params.speed_cap_mask[idx] &
10554                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10555                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10556
10557                 if (!(bp->link_params.speed_cap_mask[idx] &
10558                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10559                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10560
10561                 if (!(bp->link_params.speed_cap_mask[idx] &
10562                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10563                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10564
10565                 if (!(bp->link_params.speed_cap_mask[idx] &
10566                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10567                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10568                                                      SUPPORTED_1000baseT_Full);
10569
10570                 if (!(bp->link_params.speed_cap_mask[idx] &
10571                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10572                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10573
10574                 if (!(bp->link_params.speed_cap_mask[idx] &
10575                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10576                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10577
10578                 if (!(bp->link_params.speed_cap_mask[idx] &
10579                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
10580                         bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
10581         }
10582
10583         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10584                        bp->port.supported[1]);
10585 }
10586
10587 static void bnx2x_link_settings_requested(struct bnx2x *bp)
10588 {
10589         u32 link_config, idx, cfg_size = 0;
10590         bp->port.advertising[0] = 0;
10591         bp->port.advertising[1] = 0;
10592         switch (bp->link_params.num_phys) {
10593         case 1:
10594         case 2:
10595                 cfg_size = 1;
10596                 break;
10597         case 3:
10598                 cfg_size = 2;
10599                 break;
10600         }
10601         for (idx = 0; idx < cfg_size; idx++) {
10602                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10603                 link_config = bp->port.link_config[idx];
10604                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10605                 case PORT_FEATURE_LINK_SPEED_AUTO:
10606                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10607                                 bp->link_params.req_line_speed[idx] =
10608                                         SPEED_AUTO_NEG;
10609                                 bp->port.advertising[idx] |=
10610                                         bp->port.supported[idx];
10611                                 if (bp->link_params.phy[EXT_PHY1].type ==
10612                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10613                                         bp->port.advertising[idx] |=
10614                                         (SUPPORTED_100baseT_Half |
10615                                          SUPPORTED_100baseT_Full);
10616                         } else {
10617                                 /* force 10G, no AN */
10618                                 bp->link_params.req_line_speed[idx] =
10619                                         SPEED_10000;
10620                                 bp->port.advertising[idx] |=
10621                                         (ADVERTISED_10000baseT_Full |
10622                                          ADVERTISED_FIBRE);
10623                                 continue;
10624                         }
10625                         break;
10626
10627                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10628                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10629                                 bp->link_params.req_line_speed[idx] =
10630                                         SPEED_10;
10631                                 bp->port.advertising[idx] |=
10632                                         (ADVERTISED_10baseT_Full |
10633                                          ADVERTISED_TP);
10634                         } else {
10635                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10636                                             link_config,
10637                                     bp->link_params.speed_cap_mask[idx]);
10638                                 return;
10639                         }
10640                         break;
10641
10642                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10643                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10644                                 bp->link_params.req_line_speed[idx] =
10645                                         SPEED_10;
10646                                 bp->link_params.req_duplex[idx] =
10647                                         DUPLEX_HALF;
10648                                 bp->port.advertising[idx] |=
10649                                         (ADVERTISED_10baseT_Half |
10650                                          ADVERTISED_TP);
10651                         } else {
10652                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10653                                             link_config,
10654                                           bp->link_params.speed_cap_mask[idx]);
10655                                 return;
10656                         }
10657                         break;
10658
10659                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10660                         if (bp->port.supported[idx] &
10661                             SUPPORTED_100baseT_Full) {
10662                                 bp->link_params.req_line_speed[idx] =
10663                                         SPEED_100;
10664                                 bp->port.advertising[idx] |=
10665                                         (ADVERTISED_100baseT_Full |
10666                                          ADVERTISED_TP);
10667                         } else {
10668                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10669                                             link_config,
10670                                           bp->link_params.speed_cap_mask[idx]);
10671                                 return;
10672                         }
10673                         break;
10674
10675                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10676                         if (bp->port.supported[idx] &
10677                             SUPPORTED_100baseT_Half) {
10678                                 bp->link_params.req_line_speed[idx] =
10679                                                                 SPEED_100;
10680                                 bp->link_params.req_duplex[idx] =
10681                                                                 DUPLEX_HALF;
10682                                 bp->port.advertising[idx] |=
10683                                         (ADVERTISED_100baseT_Half |
10684                                          ADVERTISED_TP);
10685                         } else {
10686                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10687                                     link_config,
10688                                     bp->link_params.speed_cap_mask[idx]);
10689                                 return;
10690                         }
10691                         break;
10692
10693                 case PORT_FEATURE_LINK_SPEED_1G:
10694                         if (bp->port.supported[idx] &
10695                             SUPPORTED_1000baseT_Full) {
10696                                 bp->link_params.req_line_speed[idx] =
10697                                         SPEED_1000;
10698                                 bp->port.advertising[idx] |=
10699                                         (ADVERTISED_1000baseT_Full |
10700                                          ADVERTISED_TP);
10701                         } else {
10702                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10703                                     link_config,
10704                                     bp->link_params.speed_cap_mask[idx]);
10705                                 return;
10706                         }
10707                         break;
10708
10709                 case PORT_FEATURE_LINK_SPEED_2_5G:
10710                         if (bp->port.supported[idx] &
10711                             SUPPORTED_2500baseX_Full) {
10712                                 bp->link_params.req_line_speed[idx] =
10713                                         SPEED_2500;
10714                                 bp->port.advertising[idx] |=
10715                                         (ADVERTISED_2500baseX_Full |
10716                                                 ADVERTISED_TP);
10717                         } else {
10718                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10719                                     link_config,
10720                                     bp->link_params.speed_cap_mask[idx]);
10721                                 return;
10722                         }
10723                         break;
10724
10725                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10726                         if (bp->port.supported[idx] &
10727                             SUPPORTED_10000baseT_Full) {
10728                                 bp->link_params.req_line_speed[idx] =
10729                                         SPEED_10000;
10730                                 bp->port.advertising[idx] |=
10731                                         (ADVERTISED_10000baseT_Full |
10732                                                 ADVERTISED_FIBRE);
10733                         } else {
10734                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10735                                     link_config,
10736                                     bp->link_params.speed_cap_mask[idx]);
10737                                 return;
10738                         }
10739                         break;
10740                 case PORT_FEATURE_LINK_SPEED_20G:
10741                         bp->link_params.req_line_speed[idx] = SPEED_20000;
10742
10743                         break;
10744                 default:
10745                         BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10746                                   link_config);
10747                                 bp->link_params.req_line_speed[idx] =
10748                                                         SPEED_AUTO_NEG;
10749                                 bp->port.advertising[idx] =
10750                                                 bp->port.supported[idx];
10751                         break;
10752                 }
10753
10754                 bp->link_params.req_flow_ctrl[idx] = (link_config &
10755                                          PORT_FEATURE_FLOW_CONTROL_MASK);
10756                 if (bp->link_params.req_flow_ctrl[idx] ==
10757                     BNX2X_FLOW_CTRL_AUTO) {
10758                         if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10759                                 bp->link_params.req_flow_ctrl[idx] =
10760                                                         BNX2X_FLOW_CTRL_NONE;
10761                         else
10762                                 bnx2x_set_requested_fc(bp);
10763                 }
10764
10765                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10766                                bp->link_params.req_line_speed[idx],
10767                                bp->link_params.req_duplex[idx],
10768                                bp->link_params.req_flow_ctrl[idx],
10769                                bp->port.advertising[idx]);
10770         }
10771 }
10772
10773 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10774 {
10775         __be16 mac_hi_be = cpu_to_be16(mac_hi);
10776         __be32 mac_lo_be = cpu_to_be32(mac_lo);
10777         memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
10778         memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
10779 }
10780
10781 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10782 {
10783         int port = BP_PORT(bp);
10784         u32 config;
10785         u32 ext_phy_type, ext_phy_config, eee_mode;
10786
10787         bp->link_params.bp = bp;
10788         bp->link_params.port = port;
10789
10790         bp->link_params.lane_config =
10791                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10792
10793         bp->link_params.speed_cap_mask[0] =
10794                 SHMEM_RD(bp,
10795                          dev_info.port_hw_config[port].speed_capability_mask) &
10796                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10797         bp->link_params.speed_cap_mask[1] =
10798                 SHMEM_RD(bp,
10799                          dev_info.port_hw_config[port].speed_capability_mask2) &
10800                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10801         bp->port.link_config[0] =
10802                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10803
10804         bp->port.link_config[1] =
10805                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10806
10807         bp->link_params.multi_phy_config =
10808                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10809         /* If the device is capable of WoL, set the default state according
10810          * to the HW
10811          */
10812         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10813         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10814                    (config & PORT_FEATURE_WOL_ENABLED));
10815
10816         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10817             PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
10818                 bp->flags |= NO_ISCSI_FLAG;
10819         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10820             PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
10821                 bp->flags |= NO_FCOE_FLAG;
10822
10823         BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
10824                        bp->link_params.lane_config,
10825                        bp->link_params.speed_cap_mask[0],
10826                        bp->port.link_config[0]);
10827
10828         bp->link_params.switch_cfg = (bp->port.link_config[0] &
10829                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
10830         bnx2x_phy_probe(&bp->link_params);
10831         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10832
10833         bnx2x_link_settings_requested(bp);
10834
10835         /*
10836          * If connected directly, work with the internal PHY, otherwise, work
10837          * with the external PHY
10838          */
10839         ext_phy_config =
10840                 SHMEM_RD(bp,
10841                          dev_info.port_hw_config[port].external_phy_config);
10842         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
10843         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
10844                 bp->mdio.prtad = bp->port.phy_addr;
10845
10846         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10847                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10848                 bp->mdio.prtad =
10849                         XGXS_EXT_PHY_ADDR(ext_phy_config);
10850
10851         /* Configure link feature according to nvram value */
10852         eee_mode = (((SHMEM_RD(bp, dev_info.
10853                       port_feature_config[port].eee_power_mode)) &
10854                      PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10855                     PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10856         if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10857                 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10858                                            EEE_MODE_ENABLE_LPI |
10859                                            EEE_MODE_OUTPUT_TIME;
10860         } else {
10861                 bp->link_params.eee_mode = 0;
10862         }
10863 }
10864
10865 void bnx2x_get_iscsi_info(struct bnx2x *bp)
10866 {
10867         u32 no_flags = NO_ISCSI_FLAG;
10868         int port = BP_PORT(bp);
10869         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10870                                 drv_lic_key[port].max_iscsi_conn);
10871
10872         if (!CNIC_SUPPORT(bp)) {
10873                 bp->flags |= no_flags;
10874                 return;
10875         }
10876
10877         /* Get the number of maximum allowed iSCSI connections */
10878         bp->cnic_eth_dev.max_iscsi_conn =
10879                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10880                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10881
10882         BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10883                        bp->cnic_eth_dev.max_iscsi_conn);
10884
10885         /*
10886          * If maximum allowed number of connections is zero -
10887          * disable the feature.
10888          */
10889         if (!bp->cnic_eth_dev.max_iscsi_conn)
10890                 bp->flags |= no_flags;
10891 }
10892
10893 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10894 {
10895         /* Port info */
10896         bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10897                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10898         bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10899                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10900
10901         /* Node info */
10902         bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10903                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10904         bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10905                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10906 }
10907
10908 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
10909 {
10910         u8 count = 0;
10911
10912         if (IS_MF(bp)) {
10913                 u8 fid;
10914
10915                 /* iterate over absolute function ids for this path: */
10916                 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
10917                         if (IS_MF_SD(bp)) {
10918                                 u32 cfg = MF_CFG_RD(bp,
10919                                                     func_mf_config[fid].config);
10920
10921                                 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
10922                                     ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
10923                                             FUNC_MF_CFG_PROTOCOL_FCOE))
10924                                         count++;
10925                         } else {
10926                                 u32 cfg = MF_CFG_RD(bp,
10927                                                     func_ext_config[fid].
10928                                                                       func_cfg);
10929
10930                                 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
10931                                     (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
10932                                         count++;
10933                         }
10934                 }
10935         } else { /* SF */
10936                 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
10937
10938                 for (port = 0; port < port_cnt; port++) {
10939                         u32 lic = SHMEM_RD(bp,
10940                                            drv_lic_key[port].max_fcoe_conn) ^
10941                                   FW_ENCODE_32BIT_PATTERN;
10942                         if (lic)
10943                                 count++;
10944                 }
10945         }
10946
10947         return count;
10948 }
10949
10950 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10951 {
10952         int port = BP_PORT(bp);
10953         int func = BP_ABS_FUNC(bp);
10954         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10955                                 drv_lic_key[port].max_fcoe_conn);
10956         u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
10957
10958         if (!CNIC_SUPPORT(bp)) {
10959                 bp->flags |= NO_FCOE_FLAG;
10960                 return;
10961         }
10962
10963         /* Get the number of maximum allowed FCoE connections */
10964         bp->cnic_eth_dev.max_fcoe_conn =
10965                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10966                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10967
10968         /* Calculate the number of maximum allowed FCoE tasks */
10969         bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
10970
10971         /* check if FCoE resources must be shared between different functions */
10972         if (num_fcoe_func)
10973                 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
10974
10975         /* Read the WWN: */
10976         if (!IS_MF(bp)) {
10977                 /* Port info */
10978                 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10979                         SHMEM_RD(bp,
10980                                  dev_info.port_hw_config[port].
10981                                  fcoe_wwn_port_name_upper);
10982                 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10983                         SHMEM_RD(bp,
10984                                  dev_info.port_hw_config[port].
10985                                  fcoe_wwn_port_name_lower);
10986
10987                 /* Node info */
10988                 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10989                         SHMEM_RD(bp,
10990                                  dev_info.port_hw_config[port].
10991                                  fcoe_wwn_node_name_upper);
10992                 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10993                         SHMEM_RD(bp,
10994                                  dev_info.port_hw_config[port].
10995                                  fcoe_wwn_node_name_lower);
10996         } else if (!IS_MF_SD(bp)) {
10997                 /*
10998                  * Read the WWN info only if the FCoE feature is enabled for
10999                  * this function.
11000                  */
11001                 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11002                         bnx2x_get_ext_wwn_info(bp, func);
11003
11004         } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
11005                 bnx2x_get_ext_wwn_info(bp, func);
11006         }
11007
11008         BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11009
11010         /*
11011          * If maximum allowed number of connections is zero -
11012          * disable the feature.
11013          */
11014         if (!bp->cnic_eth_dev.max_fcoe_conn)
11015                 bp->flags |= NO_FCOE_FLAG;
11016 }
11017
11018 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11019 {
11020         /*
11021          * iSCSI may be dynamically disabled but reading
11022          * info here we will decrease memory usage by driver
11023          * if the feature is disabled for good
11024          */
11025         bnx2x_get_iscsi_info(bp);
11026         bnx2x_get_fcoe_info(bp);
11027 }
11028
11029 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11030 {
11031         u32 val, val2;
11032         int func = BP_ABS_FUNC(bp);
11033         int port = BP_PORT(bp);
11034         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11035         u8 *fip_mac = bp->fip_mac;
11036
11037         if (IS_MF(bp)) {
11038                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11039                  * FCoE MAC then the appropriate feature should be disabled.
11040                  * In non SD mode features configuration comes from struct
11041                  * func_ext_config.
11042                  */
11043                 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
11044                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11045                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11046                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11047                                                  iscsi_mac_addr_upper);
11048                                 val = MF_CFG_RD(bp, func_ext_config[func].
11049                                                 iscsi_mac_addr_lower);
11050                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11051                                 BNX2X_DEV_INFO
11052                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11053                         } else {
11054                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11055                         }
11056
11057                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11058                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11059                                                  fcoe_mac_addr_upper);
11060                                 val = MF_CFG_RD(bp, func_ext_config[func].
11061                                                 fcoe_mac_addr_lower);
11062                                 bnx2x_set_mac_buf(fip_mac, val, val2);
11063                                 BNX2X_DEV_INFO
11064                                         ("Read FCoE L2 MAC: %pM\n", fip_mac);
11065                         } else {
11066                                 bp->flags |= NO_FCOE_FLAG;
11067                         }
11068
11069                         bp->mf_ext_config = cfg;
11070
11071                 } else { /* SD MODE */
11072                         if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11073                                 /* use primary mac as iscsi mac */
11074                                 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11075
11076                                 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11077                                 BNX2X_DEV_INFO
11078                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11079                         } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11080                                 /* use primary mac as fip mac */
11081                                 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11082                                 BNX2X_DEV_INFO("SD FCoE MODE\n");
11083                                 BNX2X_DEV_INFO
11084                                         ("Read FIP MAC: %pM\n", fip_mac);
11085                         }
11086                 }
11087
11088                 /* If this is a storage-only interface, use SAN mac as
11089                  * primary MAC. Notice that for SD this is already the case,
11090                  * as the SAN mac was copied from the primary MAC.
11091                  */
11092                 if (IS_MF_FCOE_AFEX(bp))
11093                         memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11094         } else {
11095                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11096                                 iscsi_mac_upper);
11097                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11098                                iscsi_mac_lower);
11099                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11100
11101                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11102                                 fcoe_fip_mac_upper);
11103                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11104                                fcoe_fip_mac_lower);
11105                 bnx2x_set_mac_buf(fip_mac, val, val2);
11106         }
11107
11108         /* Disable iSCSI OOO if MAC configuration is invalid. */
11109         if (!is_valid_ether_addr(iscsi_mac)) {
11110                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11111                 memset(iscsi_mac, 0, ETH_ALEN);
11112         }
11113
11114         /* Disable FCoE if MAC configuration is invalid. */
11115         if (!is_valid_ether_addr(fip_mac)) {
11116                 bp->flags |= NO_FCOE_FLAG;
11117                 memset(bp->fip_mac, 0, ETH_ALEN);
11118         }
11119 }
11120
11121 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11122 {
11123         u32 val, val2;
11124         int func = BP_ABS_FUNC(bp);
11125         int port = BP_PORT(bp);
11126
11127         /* Zero primary MAC configuration */
11128         memset(bp->dev->dev_addr, 0, ETH_ALEN);
11129
11130         if (BP_NOMCP(bp)) {
11131                 BNX2X_ERROR("warning: random MAC workaround active\n");
11132                 eth_hw_addr_random(bp->dev);
11133         } else if (IS_MF(bp)) {
11134                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11135                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11136                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11137                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11138                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11139
11140                 if (CNIC_SUPPORT(bp))
11141                         bnx2x_get_cnic_mac_hwinfo(bp);
11142         } else {
11143                 /* in SF read MACs from port configuration */
11144                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11145                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11146                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11147
11148                 if (CNIC_SUPPORT(bp))
11149                         bnx2x_get_cnic_mac_hwinfo(bp);
11150         }
11151
11152         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11153
11154         if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
11155                 dev_err(&bp->pdev->dev,
11156                         "bad Ethernet MAC address configuration: %pM\n"
11157                         "change it manually before bringing up the appropriate network interface\n",
11158                         bp->dev->dev_addr);
11159 }
11160
11161 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11162 {
11163         int tmp;
11164         u32 cfg;
11165
11166         if (IS_VF(bp))
11167                 return 0;
11168
11169         if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11170                 /* Take function: tmp = func */
11171                 tmp = BP_ABS_FUNC(bp);
11172                 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11173                 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11174         } else {
11175                 /* Take port: tmp = port */
11176                 tmp = BP_PORT(bp);
11177                 cfg = SHMEM_RD(bp,
11178                                dev_info.port_hw_config[tmp].generic_features);
11179                 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11180         }
11181         return cfg;
11182 }
11183
11184 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11185 {
11186         int /*abs*/func = BP_ABS_FUNC(bp);
11187         int vn;
11188         u32 val = 0;
11189         int rc = 0;
11190
11191         bnx2x_get_common_hwinfo(bp);
11192
11193         /*
11194          * initialize IGU parameters
11195          */
11196         if (CHIP_IS_E1x(bp)) {
11197                 bp->common.int_block = INT_BLOCK_HC;
11198
11199                 bp->igu_dsb_id = DEF_SB_IGU_ID;
11200                 bp->igu_base_sb = 0;
11201         } else {
11202                 bp->common.int_block = INT_BLOCK_IGU;
11203
11204                 /* do not allow device reset during IGU info processing */
11205                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11206
11207                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11208
11209                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11210                         int tout = 5000;
11211
11212                         BNX2X_DEV_INFO("FORCING Normal Mode\n");
11213
11214                         val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11215                         REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11216                         REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11217
11218                         while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11219                                 tout--;
11220                                 usleep_range(1000, 2000);
11221                         }
11222
11223                         if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11224                                 dev_err(&bp->pdev->dev,
11225                                         "FORCING Normal Mode failed!!!\n");
11226                                 bnx2x_release_hw_lock(bp,
11227                                                       HW_LOCK_RESOURCE_RESET);
11228                                 return -EPERM;
11229                         }
11230                 }
11231
11232                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11233                         BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11234                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11235                 } else
11236                         BNX2X_DEV_INFO("IGU Normal Mode\n");
11237
11238                 rc = bnx2x_get_igu_cam_info(bp);
11239                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11240                 if (rc)
11241                         return rc;
11242         }
11243
11244         /*
11245          * set base FW non-default (fast path) status block id, this value is
11246          * used to initialize the fw_sb_id saved on the fp/queue structure to
11247          * determine the id used by the FW.
11248          */
11249         if (CHIP_IS_E1x(bp))
11250                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11251         else /*
11252               * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11253               * the same queue are indicated on the same IGU SB). So we prefer
11254               * FW and IGU SBs to be the same value.
11255               */
11256                 bp->base_fw_ndsb = bp->igu_base_sb;
11257
11258         BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
11259                        "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11260                        bp->igu_sb_cnt, bp->base_fw_ndsb);
11261
11262         /*
11263          * Initialize MF configuration
11264          */
11265
11266         bp->mf_ov = 0;
11267         bp->mf_mode = 0;
11268         vn = BP_VN(bp);
11269
11270         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11271                 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11272                                bp->common.shmem2_base, SHMEM2_RD(bp, size),
11273                               (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11274
11275                 if (SHMEM2_HAS(bp, mf_cfg_addr))
11276                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11277                 else
11278                         bp->common.mf_cfg_base = bp->common.shmem_base +
11279                                 offsetof(struct shmem_region, func_mb) +
11280                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11281                 /*
11282                  * get mf configuration:
11283                  * 1. Existence of MF configuration
11284                  * 2. MAC address must be legal (check only upper bytes)
11285                  *    for  Switch-Independent mode;
11286                  *    OVLAN must be legal for Switch-Dependent mode
11287                  * 3. SF_MODE configures specific MF mode
11288                  */
11289                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11290                         /* get mf configuration */
11291                         val = SHMEM_RD(bp,
11292                                        dev_info.shared_feature_config.config);
11293                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11294
11295                         switch (val) {
11296                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11297                                 val = MF_CFG_RD(bp, func_mf_config[func].
11298                                                 mac_upper);
11299                                 /* check for legal mac (upper bytes)*/
11300                                 if (val != 0xffff) {
11301                                         bp->mf_mode = MULTI_FUNCTION_SI;
11302                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11303                                                    func_mf_config[func].config);
11304                                 } else
11305                                         BNX2X_DEV_INFO("illegal MAC address for SI\n");
11306                                 break;
11307                         case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11308                                 if ((!CHIP_IS_E1x(bp)) &&
11309                                     (MF_CFG_RD(bp, func_mf_config[func].
11310                                                mac_upper) != 0xffff) &&
11311                                     (SHMEM2_HAS(bp,
11312                                                 afex_driver_support))) {
11313                                         bp->mf_mode = MULTI_FUNCTION_AFEX;
11314                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11315                                                 func_mf_config[func].config);
11316                                 } else {
11317                                         BNX2X_DEV_INFO("can not configure afex mode\n");
11318                                 }
11319                                 break;
11320                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11321                                 /* get OV configuration */
11322                                 val = MF_CFG_RD(bp,
11323                                         func_mf_config[FUNC_0].e1hov_tag);
11324                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11325
11326                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11327                                         bp->mf_mode = MULTI_FUNCTION_SD;
11328                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11329                                                 func_mf_config[func].config);
11330                                 } else
11331                                         BNX2X_DEV_INFO("illegal OV for SD\n");
11332                                 break;
11333                         case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11334                                 bp->mf_config[vn] = 0;
11335                                 break;
11336                         default:
11337                                 /* Unknown configuration: reset mf_config */
11338                                 bp->mf_config[vn] = 0;
11339                                 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11340                         }
11341                 }
11342
11343                 BNX2X_DEV_INFO("%s function mode\n",
11344                                IS_MF(bp) ? "multi" : "single");
11345
11346                 switch (bp->mf_mode) {
11347                 case MULTI_FUNCTION_SD:
11348                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11349                               FUNC_MF_CFG_E1HOV_TAG_MASK;
11350                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11351                                 bp->mf_ov = val;
11352                                 bp->path_has_ovlan = true;
11353
11354                                 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11355                                                func, bp->mf_ov, bp->mf_ov);
11356                         } else {
11357                                 dev_err(&bp->pdev->dev,
11358                                         "No valid MF OV for func %d, aborting\n",
11359                                         func);
11360                                 return -EPERM;
11361                         }
11362                         break;
11363                 case MULTI_FUNCTION_AFEX:
11364                         BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11365                         break;
11366                 case MULTI_FUNCTION_SI:
11367                         BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11368                                        func);
11369                         break;
11370                 default:
11371                         if (vn) {
11372                                 dev_err(&bp->pdev->dev,
11373                                         "VN %d is in a single function mode, aborting\n",
11374                                         vn);
11375                                 return -EPERM;
11376                         }
11377                         break;
11378                 }
11379
11380                 /* check if other port on the path needs ovlan:
11381                  * Since MF configuration is shared between ports
11382                  * Possible mixed modes are only
11383                  * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
11384                  */
11385                 if (CHIP_MODE_IS_4_PORT(bp) &&
11386                     !bp->path_has_ovlan &&
11387                     !IS_MF(bp) &&
11388                     bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11389                         u8 other_port = !BP_PORT(bp);
11390                         u8 other_func = BP_PATH(bp) + 2*other_port;
11391                         val = MF_CFG_RD(bp,
11392                                         func_mf_config[other_func].e1hov_tag);
11393                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11394                                 bp->path_has_ovlan = true;
11395                 }
11396         }
11397
11398         /* adjust igu_sb_cnt to MF for E1x */
11399         if (CHIP_IS_E1x(bp) && IS_MF(bp))
11400                 bp->igu_sb_cnt /= E1HVN_MAX;
11401
11402         /* port info */
11403         bnx2x_get_port_hwinfo(bp);
11404
11405         /* Get MAC addresses */
11406         bnx2x_get_mac_hwinfo(bp);
11407
11408         bnx2x_get_cnic_info(bp);
11409
11410         return rc;
11411 }
11412
11413 static void bnx2x_read_fwinfo(struct bnx2x *bp)
11414 {
11415         int cnt, i, block_end, rodi;
11416         char vpd_start[BNX2X_VPD_LEN+1];
11417         char str_id_reg[VENDOR_ID_LEN+1];
11418         char str_id_cap[VENDOR_ID_LEN+1];
11419         char *vpd_data;
11420         char *vpd_extended_data = NULL;
11421         u8 len;
11422
11423         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11424         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11425
11426         if (cnt < BNX2X_VPD_LEN)
11427                 goto out_not_found;
11428
11429         /* VPD RO tag should be first tag after identifier string, hence
11430          * we should be able to find it in first BNX2X_VPD_LEN chars
11431          */
11432         i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11433                              PCI_VPD_LRDT_RO_DATA);
11434         if (i < 0)
11435                 goto out_not_found;
11436
11437         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11438                     pci_vpd_lrdt_size(&vpd_start[i]);
11439
11440         i += PCI_VPD_LRDT_TAG_SIZE;
11441
11442         if (block_end > BNX2X_VPD_LEN) {
11443                 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11444                 if (vpd_extended_data  == NULL)
11445                         goto out_not_found;
11446
11447                 /* read rest of vpd image into vpd_extended_data */
11448                 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11449                 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11450                                    block_end - BNX2X_VPD_LEN,
11451                                    vpd_extended_data + BNX2X_VPD_LEN);
11452                 if (cnt < (block_end - BNX2X_VPD_LEN))
11453                         goto out_not_found;
11454                 vpd_data = vpd_extended_data;
11455         } else
11456                 vpd_data = vpd_start;
11457
11458         /* now vpd_data holds full vpd content in both cases */
11459
11460         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11461                                    PCI_VPD_RO_KEYWORD_MFR_ID);
11462         if (rodi < 0)
11463                 goto out_not_found;
11464
11465         len = pci_vpd_info_field_size(&vpd_data[rodi]);
11466
11467         if (len != VENDOR_ID_LEN)
11468                 goto out_not_found;
11469
11470         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11471
11472         /* vendor specific info */
11473         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11474         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11475         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11476             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11477
11478                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11479                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
11480                 if (rodi >= 0) {
11481                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
11482
11483                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11484
11485                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11486                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11487                                 bp->fw_ver[len] = ' ';
11488                         }
11489                 }
11490                 kfree(vpd_extended_data);
11491                 return;
11492         }
11493 out_not_found:
11494         kfree(vpd_extended_data);
11495         return;
11496 }
11497
11498 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
11499 {
11500         u32 flags = 0;
11501
11502         if (CHIP_REV_IS_FPGA(bp))
11503                 SET_FLAGS(flags, MODE_FPGA);
11504         else if (CHIP_REV_IS_EMUL(bp))
11505                 SET_FLAGS(flags, MODE_EMUL);
11506         else
11507                 SET_FLAGS(flags, MODE_ASIC);
11508
11509         if (CHIP_MODE_IS_4_PORT(bp))
11510                 SET_FLAGS(flags, MODE_PORT4);
11511         else
11512                 SET_FLAGS(flags, MODE_PORT2);
11513
11514         if (CHIP_IS_E2(bp))
11515                 SET_FLAGS(flags, MODE_E2);
11516         else if (CHIP_IS_E3(bp)) {
11517                 SET_FLAGS(flags, MODE_E3);
11518                 if (CHIP_REV(bp) == CHIP_REV_Ax)
11519                         SET_FLAGS(flags, MODE_E3_A0);
11520                 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
11521                         SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
11522         }
11523
11524         if (IS_MF(bp)) {
11525                 SET_FLAGS(flags, MODE_MF);
11526                 switch (bp->mf_mode) {
11527                 case MULTI_FUNCTION_SD:
11528                         SET_FLAGS(flags, MODE_MF_SD);
11529                         break;
11530                 case MULTI_FUNCTION_SI:
11531                         SET_FLAGS(flags, MODE_MF_SI);
11532                         break;
11533                 case MULTI_FUNCTION_AFEX:
11534                         SET_FLAGS(flags, MODE_MF_AFEX);
11535                         break;
11536                 }
11537         } else
11538                 SET_FLAGS(flags, MODE_SF);
11539
11540 #if defined(__LITTLE_ENDIAN)
11541         SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11542 #else /*(__BIG_ENDIAN)*/
11543         SET_FLAGS(flags, MODE_BIG_ENDIAN);
11544 #endif
11545         INIT_MODE_FLAGS(bp) = flags;
11546 }
11547
11548 static int bnx2x_init_bp(struct bnx2x *bp)
11549 {
11550         int func;
11551         int rc;
11552
11553         mutex_init(&bp->port.phy_mutex);
11554         mutex_init(&bp->fw_mb_mutex);
11555         spin_lock_init(&bp->stats_lock);
11556         sema_init(&bp->stats_sema, 1);
11557
11558         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11559         INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11560         INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11561         if (IS_PF(bp)) {
11562                 rc = bnx2x_get_hwinfo(bp);
11563                 if (rc)
11564                         return rc;
11565         } else {
11566                 eth_zero_addr(bp->dev->dev_addr);
11567         }
11568
11569         bnx2x_set_modes_bitmap(bp);
11570
11571         rc = bnx2x_alloc_mem_bp(bp);
11572         if (rc)
11573                 return rc;
11574
11575         bnx2x_read_fwinfo(bp);
11576
11577         func = BP_FUNC(bp);
11578
11579         /* need to reset chip if undi was active */
11580         if (IS_PF(bp) && !BP_NOMCP(bp)) {
11581                 /* init fw_seq */
11582                 bp->fw_seq =
11583                         SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11584                                                         DRV_MSG_SEQ_NUMBER_MASK;
11585                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11586
11587                 bnx2x_prev_unload(bp);
11588         }
11589
11590         if (CHIP_REV_IS_FPGA(bp))
11591                 dev_err(&bp->pdev->dev, "FPGA detected\n");
11592
11593         if (BP_NOMCP(bp) && (func == 0))
11594                 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11595
11596         bp->disable_tpa = disable_tpa;
11597         bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11598
11599         /* Set TPA flags */
11600         if (bp->disable_tpa) {
11601                 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11602                 bp->dev->features &= ~NETIF_F_LRO;
11603         } else {
11604                 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11605                 bp->dev->features |= NETIF_F_LRO;
11606         }
11607
11608         if (CHIP_IS_E1(bp))
11609                 bp->dropless_fc = 0;
11610         else
11611                 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
11612
11613         bp->mrrs = mrrs;
11614
11615         bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11616         if (IS_VF(bp))
11617                 bp->rx_ring_size = MAX_RX_AVAIL;
11618
11619         /* make sure that the numbers are in the right granularity */
11620         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11621         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11622
11623         bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11624
11625         init_timer(&bp->timer);
11626         bp->timer.expires = jiffies + bp->current_interval;
11627         bp->timer.data = (unsigned long) bp;
11628         bp->timer.function = bnx2x_timer;
11629
11630         if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11631             SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11632             SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11633             SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11634                 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11635                 bnx2x_dcbx_init_params(bp);
11636         } else {
11637                 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11638         }
11639
11640         if (CHIP_IS_E1x(bp))
11641                 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11642         else
11643                 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11644
11645         /* multiple tx priority */
11646         if (IS_VF(bp))
11647                 bp->max_cos = 1;
11648         else if (CHIP_IS_E1x(bp))
11649                 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11650         else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11651                 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11652         else if (CHIP_IS_E3B0(bp))
11653                 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11654         else
11655                 BNX2X_ERR("unknown chip %x revision %x\n",
11656                           CHIP_NUM(bp), CHIP_REV(bp));
11657         BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
11658
11659         /* We need at least one default status block for slow-path events,
11660          * second status block for the L2 queue, and a third status block for
11661          * CNIC if supported.
11662          */
11663         if (IS_VF(bp))
11664                 bp->min_msix_vec_cnt = 1;
11665         else if (CNIC_SUPPORT(bp))
11666                 bp->min_msix_vec_cnt = 3;
11667         else /* PF w/o cnic */
11668                 bp->min_msix_vec_cnt = 2;
11669         BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11670
11671         bp->dump_preset_idx = 1;
11672
11673         return rc;
11674 }
11675
11676 /****************************************************************************
11677 * General service functions
11678 ****************************************************************************/
11679
11680 /*
11681  * net_device service functions
11682  */
11683
11684 /* called with rtnl_lock */
11685 static int bnx2x_open(struct net_device *dev)
11686 {
11687         struct bnx2x *bp = netdev_priv(dev);
11688         bool global = false;
11689         int other_engine = BP_PATH(bp) ? 0 : 1;
11690         bool other_load_status, load_status;
11691         int rc;
11692
11693         bp->stats_init = true;
11694
11695         netif_carrier_off(dev);
11696
11697         bnx2x_set_power_state(bp, PCI_D0);
11698
11699         /* If parity had happen during the unload, then attentions
11700          * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11701          * want the first function loaded on the current engine to
11702          * complete the recovery.
11703          * Parity recovery is only relevant for PF driver.
11704          */
11705         if (IS_PF(bp)) {
11706                 other_load_status = bnx2x_get_load_status(bp, other_engine);
11707                 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11708                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11709                     bnx2x_chk_parity_attn(bp, &global, true)) {
11710                         do {
11711                                 /* If there are attentions and they are in a
11712                                  * global blocks, set the GLOBAL_RESET bit
11713                                  * regardless whether it will be this function
11714                                  * that will complete the recovery or not.
11715                                  */
11716                                 if (global)
11717                                         bnx2x_set_reset_global(bp);
11718
11719                                 /* Only the first function on the current
11720                                  * engine should try to recover in open. In case
11721                                  * of attentions in global blocks only the first
11722                                  * in the chip should try to recover.
11723                                  */
11724                                 if ((!load_status &&
11725                                      (!global || !other_load_status)) &&
11726                                       bnx2x_trylock_leader_lock(bp) &&
11727                                       !bnx2x_leader_reset(bp)) {
11728                                         netdev_info(bp->dev,
11729                                                     "Recovered in open\n");
11730                                         break;
11731                                 }
11732
11733                                 /* recovery has failed... */
11734                                 bnx2x_set_power_state(bp, PCI_D3hot);
11735                                 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11736
11737                                 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11738                                           "If you still see this message after a few retries then power cycle is required.\n");
11739
11740                                 return -EAGAIN;
11741                         } while (0);
11742                 }
11743         }
11744
11745         bp->recovery_state = BNX2X_RECOVERY_DONE;
11746         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11747         if (rc)
11748                 return rc;
11749         return bnx2x_open_epilog(bp);
11750 }
11751
11752 /* called with rtnl_lock */
11753 static int bnx2x_close(struct net_device *dev)
11754 {
11755         struct bnx2x *bp = netdev_priv(dev);
11756
11757         /* Unload the driver, release IRQs */
11758         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11759
11760         return 0;
11761 }
11762
11763 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11764                                       struct bnx2x_mcast_ramrod_params *p)
11765 {
11766         int mc_count = netdev_mc_count(bp->dev);
11767         struct bnx2x_mcast_list_elem *mc_mac =
11768                 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11769         struct netdev_hw_addr *ha;
11770
11771         if (!mc_mac)
11772                 return -ENOMEM;
11773
11774         INIT_LIST_HEAD(&p->mcast_list);
11775
11776         netdev_for_each_mc_addr(ha, bp->dev) {
11777                 mc_mac->mac = bnx2x_mc_addr(ha);
11778                 list_add_tail(&mc_mac->link, &p->mcast_list);
11779                 mc_mac++;
11780         }
11781
11782         p->mcast_list_len = mc_count;
11783
11784         return 0;
11785 }
11786
11787 static void bnx2x_free_mcast_macs_list(
11788         struct bnx2x_mcast_ramrod_params *p)
11789 {
11790         struct bnx2x_mcast_list_elem *mc_mac =
11791                 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11792                                  link);
11793
11794         WARN_ON(!mc_mac);
11795         kfree(mc_mac);
11796 }
11797
11798 /**
11799  * bnx2x_set_uc_list - configure a new unicast MACs list.
11800  *
11801  * @bp: driver handle
11802  *
11803  * We will use zero (0) as a MAC type for these MACs.
11804  */
11805 static int bnx2x_set_uc_list(struct bnx2x *bp)
11806 {
11807         int rc;
11808         struct net_device *dev = bp->dev;
11809         struct netdev_hw_addr *ha;
11810         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11811         unsigned long ramrod_flags = 0;
11812
11813         /* First schedule a cleanup up of old configuration */
11814         rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11815         if (rc < 0) {
11816                 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11817                 return rc;
11818         }
11819
11820         netdev_for_each_uc_addr(ha, dev) {
11821                 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11822                                        BNX2X_UC_LIST_MAC, &ramrod_flags);
11823                 if (rc == -EEXIST) {
11824                         DP(BNX2X_MSG_SP,
11825                            "Failed to schedule ADD operations: %d\n", rc);
11826                         /* do not treat adding same MAC as error */
11827                         rc = 0;
11828
11829                 } else if (rc < 0) {
11830
11831                         BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11832                                   rc);
11833                         return rc;
11834                 }
11835         }
11836
11837         /* Execute the pending commands */
11838         __set_bit(RAMROD_CONT, &ramrod_flags);
11839         return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11840                                  BNX2X_UC_LIST_MAC, &ramrod_flags);
11841 }
11842
11843 static int bnx2x_set_mc_list(struct bnx2x *bp)
11844 {
11845         struct net_device *dev = bp->dev;
11846         struct bnx2x_mcast_ramrod_params rparam = {NULL};
11847         int rc = 0;
11848
11849         rparam.mcast_obj = &bp->mcast_obj;
11850
11851         /* first, clear all configured multicast MACs */
11852         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11853         if (rc < 0) {
11854                 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
11855                 return rc;
11856         }
11857
11858         /* then, configure a new MACs list */
11859         if (netdev_mc_count(dev)) {
11860                 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11861                 if (rc) {
11862                         BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11863                                   rc);
11864                         return rc;
11865                 }
11866
11867                 /* Now add the new MACs */
11868                 rc = bnx2x_config_mcast(bp, &rparam,
11869                                         BNX2X_MCAST_CMD_ADD);
11870                 if (rc < 0)
11871                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11872                                   rc);
11873
11874                 bnx2x_free_mcast_macs_list(&rparam);
11875         }
11876
11877         return rc;
11878 }
11879
11880 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
11881 void bnx2x_set_rx_mode(struct net_device *dev)
11882 {
11883         struct bnx2x *bp = netdev_priv(dev);
11884
11885         if (bp->state != BNX2X_STATE_OPEN) {
11886                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11887                 return;
11888         } else {
11889                 /* Schedule an SP task to handle rest of change */
11890                 DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
11891                 smp_mb__before_clear_bit();
11892                 set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
11893                 smp_mb__after_clear_bit();
11894                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11895         }
11896 }
11897
11898 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
11899 {
11900         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11901
11902         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11903
11904         netif_addr_lock_bh(bp->dev);
11905
11906         if (bp->dev->flags & IFF_PROMISC) {
11907                 rx_mode = BNX2X_RX_MODE_PROMISC;
11908         } else if ((bp->dev->flags & IFF_ALLMULTI) ||
11909                    ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
11910                     CHIP_IS_E1(bp))) {
11911                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11912         } else {
11913                 if (IS_PF(bp)) {
11914                         /* some multicasts */
11915                         if (bnx2x_set_mc_list(bp) < 0)
11916                                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11917
11918                         /* release bh lock, as bnx2x_set_uc_list might sleep */
11919                         netif_addr_unlock_bh(bp->dev);
11920                         if (bnx2x_set_uc_list(bp) < 0)
11921                                 rx_mode = BNX2X_RX_MODE_PROMISC;
11922                         netif_addr_lock_bh(bp->dev);
11923                 } else {
11924                         /* configuring mcast to a vf involves sleeping (when we
11925                          * wait for the pf's response).
11926                          */
11927                         smp_mb__before_clear_bit();
11928                         set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
11929                                 &bp->sp_rtnl_state);
11930                         smp_mb__after_clear_bit();
11931                         schedule_delayed_work(&bp->sp_rtnl_task, 0);
11932                 }
11933         }
11934
11935         bp->rx_mode = rx_mode;
11936         /* handle ISCSI SD mode */
11937         if (IS_MF_ISCSI_SD(bp))
11938                 bp->rx_mode = BNX2X_RX_MODE_NONE;
11939
11940         /* Schedule the rx_mode command */
11941         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11942                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11943                 netif_addr_unlock_bh(bp->dev);
11944                 return;
11945         }
11946
11947         if (IS_PF(bp)) {
11948                 bnx2x_set_storm_rx_mode(bp);
11949                 netif_addr_unlock_bh(bp->dev);
11950         } else {
11951                 /* VF will need to request the PF to make this change, and so
11952                  * the VF needs to release the bottom-half lock prior to the
11953                  * request (as it will likely require sleep on the VF side)
11954                  */
11955                 netif_addr_unlock_bh(bp->dev);
11956                 bnx2x_vfpf_storm_rx_mode(bp);
11957         }
11958 }
11959
11960 /* called with rtnl_lock */
11961 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11962                            int devad, u16 addr)
11963 {
11964         struct bnx2x *bp = netdev_priv(netdev);
11965         u16 value;
11966         int rc;
11967
11968         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11969            prtad, devad, addr);
11970
11971         /* The HW expects different devad if CL22 is used */
11972         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11973
11974         bnx2x_acquire_phy_lock(bp);
11975         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
11976         bnx2x_release_phy_lock(bp);
11977         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11978
11979         if (!rc)
11980                 rc = value;
11981         return rc;
11982 }
11983
11984 /* called with rtnl_lock */
11985 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11986                             u16 addr, u16 value)
11987 {
11988         struct bnx2x *bp = netdev_priv(netdev);
11989         int rc;
11990
11991         DP(NETIF_MSG_LINK,
11992            "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11993            prtad, devad, addr, value);
11994
11995         /* The HW expects different devad if CL22 is used */
11996         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11997
11998         bnx2x_acquire_phy_lock(bp);
11999         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12000         bnx2x_release_phy_lock(bp);
12001         return rc;
12002 }
12003
12004 /* called with rtnl_lock */
12005 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12006 {
12007         struct bnx2x *bp = netdev_priv(dev);
12008         struct mii_ioctl_data *mdio = if_mii(ifr);
12009
12010         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12011            mdio->phy_id, mdio->reg_num, mdio->val_in);
12012
12013         if (!netif_running(dev))
12014                 return -EAGAIN;
12015
12016         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12017 }
12018
12019 #ifdef CONFIG_NET_POLL_CONTROLLER
12020 static void poll_bnx2x(struct net_device *dev)
12021 {
12022         struct bnx2x *bp = netdev_priv(dev);
12023         int i;
12024
12025         for_each_eth_queue(bp, i) {
12026                 struct bnx2x_fastpath *fp = &bp->fp[i];
12027                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12028         }
12029 }
12030 #endif
12031
12032 static int bnx2x_validate_addr(struct net_device *dev)
12033 {
12034         struct bnx2x *bp = netdev_priv(dev);
12035
12036         /* query the bulletin board for mac address configured by the PF */
12037         if (IS_VF(bp))
12038                 bnx2x_sample_bulletin(bp);
12039
12040         if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
12041                 BNX2X_ERR("Non-valid Ethernet address\n");
12042                 return -EADDRNOTAVAIL;
12043         }
12044         return 0;
12045 }
12046
12047 static const struct net_device_ops bnx2x_netdev_ops = {
12048         .ndo_open               = bnx2x_open,
12049         .ndo_stop               = bnx2x_close,
12050         .ndo_start_xmit         = bnx2x_start_xmit,
12051         .ndo_select_queue       = bnx2x_select_queue,
12052         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
12053         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12054         .ndo_validate_addr      = bnx2x_validate_addr,
12055         .ndo_do_ioctl           = bnx2x_ioctl,
12056         .ndo_change_mtu         = bnx2x_change_mtu,
12057         .ndo_fix_features       = bnx2x_fix_features,
12058         .ndo_set_features       = bnx2x_set_features,
12059         .ndo_tx_timeout         = bnx2x_tx_timeout,
12060 #ifdef CONFIG_NET_POLL_CONTROLLER
12061         .ndo_poll_controller    = poll_bnx2x,
12062 #endif
12063         .ndo_setup_tc           = bnx2x_setup_tc,
12064 #ifdef CONFIG_BNX2X_SRIOV
12065         .ndo_set_vf_mac         = bnx2x_set_vf_mac,
12066         .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
12067         .ndo_get_vf_config      = bnx2x_get_vf_config,
12068 #endif
12069 #ifdef NETDEV_FCOE_WWNN
12070         .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
12071 #endif
12072
12073 #ifdef CONFIG_NET_RX_BUSY_POLL
12074         .ndo_busy_poll          = bnx2x_low_latency_recv,
12075 #endif
12076 };
12077
12078 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12079 {
12080         struct device *dev = &bp->pdev->dev;
12081
12082         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) == 0) {
12083                 bp->flags |= USING_DAC_FLAG;
12084         } else if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)) != 0) {
12085                 dev_err(dev, "System does not support DMA, aborting\n");
12086                 return -EIO;
12087         }
12088
12089         return 0;
12090 }
12091
12092 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12093                           struct net_device *dev, unsigned long board_type)
12094 {
12095         int rc;
12096         u32 pci_cfg_dword;
12097         bool chip_is_e1x = (board_type == BCM57710 ||
12098                             board_type == BCM57711 ||
12099                             board_type == BCM57711E);
12100
12101         SET_NETDEV_DEV(dev, &pdev->dev);
12102
12103         bp->dev = dev;
12104         bp->pdev = pdev;
12105
12106         rc = pci_enable_device(pdev);
12107         if (rc) {
12108                 dev_err(&bp->pdev->dev,
12109                         "Cannot enable PCI device, aborting\n");
12110                 goto err_out;
12111         }
12112
12113         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12114                 dev_err(&bp->pdev->dev,
12115                         "Cannot find PCI device base address, aborting\n");
12116                 rc = -ENODEV;
12117                 goto err_out_disable;
12118         }
12119
12120         if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12121                 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
12122                 rc = -ENODEV;
12123                 goto err_out_disable;
12124         }
12125
12126         pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12127         if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12128             PCICFG_REVESION_ID_ERROR_VAL) {
12129                 pr_err("PCI device error, probably due to fan failure, aborting\n");
12130                 rc = -ENODEV;
12131                 goto err_out_disable;
12132         }
12133
12134         if (atomic_read(&pdev->enable_cnt) == 1) {
12135                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12136                 if (rc) {
12137                         dev_err(&bp->pdev->dev,
12138                                 "Cannot obtain PCI resources, aborting\n");
12139                         goto err_out_disable;
12140                 }
12141
12142                 pci_set_master(pdev);
12143                 pci_save_state(pdev);
12144         }
12145
12146         if (IS_PF(bp)) {
12147                 if (!pdev->pm_cap) {
12148                         dev_err(&bp->pdev->dev,
12149                                 "Cannot find power management capability, aborting\n");
12150                         rc = -EIO;
12151                         goto err_out_release;
12152                 }
12153         }
12154
12155         if (!pci_is_pcie(pdev)) {
12156                 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
12157                 rc = -EIO;
12158                 goto err_out_release;
12159         }
12160
12161         rc = bnx2x_set_coherency_mask(bp);
12162         if (rc)
12163                 goto err_out_release;
12164
12165         dev->mem_start = pci_resource_start(pdev, 0);
12166         dev->base_addr = dev->mem_start;
12167         dev->mem_end = pci_resource_end(pdev, 0);
12168
12169         dev->irq = pdev->irq;
12170
12171         bp->regview = pci_ioremap_bar(pdev, 0);
12172         if (!bp->regview) {
12173                 dev_err(&bp->pdev->dev,
12174                         "Cannot map register space, aborting\n");
12175                 rc = -ENOMEM;
12176                 goto err_out_release;
12177         }
12178
12179         /* In E1/E1H use pci device function given by kernel.
12180          * In E2/E3 read physical function from ME register since these chips
12181          * support Physical Device Assignment where kernel BDF maybe arbitrary
12182          * (depending on hypervisor).
12183          */
12184         if (chip_is_e1x) {
12185                 bp->pf_num = PCI_FUNC(pdev->devfn);
12186         } else {
12187                 /* chip is E2/3*/
12188                 pci_read_config_dword(bp->pdev,
12189                                       PCICFG_ME_REGISTER, &pci_cfg_dword);
12190                 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
12191                                   ME_REG_ABS_PF_NUM_SHIFT);
12192         }
12193         BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12194
12195         /* clean indirect addresses */
12196         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12197                                PCICFG_VENDOR_ID_OFFSET);
12198         /*
12199          * Clean the following indirect addresses for all functions since it
12200          * is not used by the driver.
12201          */
12202         if (IS_PF(bp)) {
12203                 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12204                 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12205                 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12206                 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12207
12208                 if (chip_is_e1x) {
12209                         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12210                         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12211                         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12212                         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12213                 }
12214
12215                 /* Enable internal target-read (in case we are probed after PF
12216                  * FLR). Must be done prior to any BAR read access. Only for
12217                  * 57712 and up
12218                  */
12219                 if (!chip_is_e1x)
12220                         REG_WR(bp,
12221                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
12222         }
12223
12224         dev->watchdog_timeo = TX_TIMEOUT;
12225
12226         dev->netdev_ops = &bnx2x_netdev_ops;
12227         bnx2x_set_ethtool_ops(bp, dev);
12228
12229         dev->priv_flags |= IFF_UNICAST_FLT;
12230
12231         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12232                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12233                 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12234                 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12235         if (!CHIP_IS_E1x(bp)) {
12236                 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12237                 dev->hw_enc_features =
12238                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12239                         NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12240                         NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12241         }
12242
12243         dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12244                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12245
12246         dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12247         if (bp->flags & USING_DAC_FLAG)
12248                 dev->features |= NETIF_F_HIGHDMA;
12249
12250         /* Add Loopback capability to the device */
12251         dev->hw_features |= NETIF_F_LOOPBACK;
12252
12253 #ifdef BCM_DCBNL
12254         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12255 #endif
12256
12257         /* get_port_hwinfo() will set prtad and mmds properly */
12258         bp->mdio.prtad = MDIO_PRTAD_NONE;
12259         bp->mdio.mmds = 0;
12260         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12261         bp->mdio.dev = dev;
12262         bp->mdio.mdio_read = bnx2x_mdio_read;
12263         bp->mdio.mdio_write = bnx2x_mdio_write;
12264
12265         return 0;
12266
12267 err_out_release:
12268         if (atomic_read(&pdev->enable_cnt) == 1)
12269                 pci_release_regions(pdev);
12270
12271 err_out_disable:
12272         pci_disable_device(pdev);
12273         pci_set_drvdata(pdev, NULL);
12274
12275 err_out:
12276         return rc;
12277 }
12278
12279 static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
12280                                        enum bnx2x_pci_bus_speed *speed)
12281 {
12282         u32 link_speed, val = 0;
12283
12284         pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
12285         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12286
12287         link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12288
12289         switch (link_speed) {
12290         case 3:
12291                 *speed = BNX2X_PCI_LINK_SPEED_8000;
12292                 break;
12293         case 2:
12294                 *speed = BNX2X_PCI_LINK_SPEED_5000;
12295                 break;
12296         default:
12297                 *speed = BNX2X_PCI_LINK_SPEED_2500;
12298         }
12299 }
12300
12301 static int bnx2x_check_firmware(struct bnx2x *bp)
12302 {
12303         const struct firmware *firmware = bp->firmware;
12304         struct bnx2x_fw_file_hdr *fw_hdr;
12305         struct bnx2x_fw_file_section *sections;
12306         u32 offset, len, num_ops;
12307         __be16 *ops_offsets;
12308         int i;
12309         const u8 *fw_ver;
12310
12311         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12312                 BNX2X_ERR("Wrong FW size\n");
12313                 return -EINVAL;
12314         }
12315
12316         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12317         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12318
12319         /* Make sure none of the offsets and sizes make us read beyond
12320          * the end of the firmware data */
12321         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12322                 offset = be32_to_cpu(sections[i].offset);
12323                 len = be32_to_cpu(sections[i].len);
12324                 if (offset + len > firmware->size) {
12325                         BNX2X_ERR("Section %d length is out of bounds\n", i);
12326                         return -EINVAL;
12327                 }
12328         }
12329
12330         /* Likewise for the init_ops offsets */
12331         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12332         ops_offsets = (__force __be16 *)(firmware->data + offset);
12333         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12334
12335         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12336                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12337                         BNX2X_ERR("Section offset %d is out of bounds\n", i);
12338                         return -EINVAL;
12339                 }
12340         }
12341
12342         /* Check FW version */
12343         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12344         fw_ver = firmware->data + offset;
12345         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12346             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12347             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12348             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12349                 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12350                        fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12351                        BCM_5710_FW_MAJOR_VERSION,
12352                        BCM_5710_FW_MINOR_VERSION,
12353                        BCM_5710_FW_REVISION_VERSION,
12354                        BCM_5710_FW_ENGINEERING_VERSION);
12355                 return -EINVAL;
12356         }
12357
12358         return 0;
12359 }
12360
12361 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12362 {
12363         const __be32 *source = (const __be32 *)_source;
12364         u32 *target = (u32 *)_target;
12365         u32 i;
12366
12367         for (i = 0; i < n/4; i++)
12368                 target[i] = be32_to_cpu(source[i]);
12369 }
12370
12371 /*
12372    Ops array is stored in the following format:
12373    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12374  */
12375 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12376 {
12377         const __be32 *source = (const __be32 *)_source;
12378         struct raw_op *target = (struct raw_op *)_target;
12379         u32 i, j, tmp;
12380
12381         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12382                 tmp = be32_to_cpu(source[j]);
12383                 target[i].op = (tmp >> 24) & 0xff;
12384                 target[i].offset = tmp & 0xffffff;
12385                 target[i].raw_data = be32_to_cpu(source[j + 1]);
12386         }
12387 }
12388
12389 /* IRO array is stored in the following format:
12390  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
12391  */
12392 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12393 {
12394         const __be32 *source = (const __be32 *)_source;
12395         struct iro *target = (struct iro *)_target;
12396         u32 i, j, tmp;
12397
12398         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12399                 target[i].base = be32_to_cpu(source[j]);
12400                 j++;
12401                 tmp = be32_to_cpu(source[j]);
12402                 target[i].m1 = (tmp >> 16) & 0xffff;
12403                 target[i].m2 = tmp & 0xffff;
12404                 j++;
12405                 tmp = be32_to_cpu(source[j]);
12406                 target[i].m3 = (tmp >> 16) & 0xffff;
12407                 target[i].size = tmp & 0xffff;
12408                 j++;
12409         }
12410 }
12411
12412 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12413 {
12414         const __be16 *source = (const __be16 *)_source;
12415         u16 *target = (u16 *)_target;
12416         u32 i;
12417
12418         for (i = 0; i < n/2; i++)
12419                 target[i] = be16_to_cpu(source[i]);
12420 }
12421
12422 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
12423 do {                                                                    \
12424         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
12425         bp->arr = kmalloc(len, GFP_KERNEL);                             \
12426         if (!bp->arr)                                                   \
12427                 goto lbl;                                               \
12428         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
12429              (u8 *)bp->arr, len);                                       \
12430 } while (0)
12431
12432 static int bnx2x_init_firmware(struct bnx2x *bp)
12433 {
12434         const char *fw_file_name;
12435         struct bnx2x_fw_file_hdr *fw_hdr;
12436         int rc;
12437
12438         if (bp->firmware)
12439                 return 0;
12440
12441         if (CHIP_IS_E1(bp))
12442                 fw_file_name = FW_FILE_NAME_E1;
12443         else if (CHIP_IS_E1H(bp))
12444                 fw_file_name = FW_FILE_NAME_E1H;
12445         else if (!CHIP_IS_E1x(bp))
12446                 fw_file_name = FW_FILE_NAME_E2;
12447         else {
12448                 BNX2X_ERR("Unsupported chip revision\n");
12449                 return -EINVAL;
12450         }
12451         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
12452
12453         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12454         if (rc) {
12455                 BNX2X_ERR("Can't load firmware file %s\n",
12456                           fw_file_name);
12457                 goto request_firmware_exit;
12458         }
12459
12460         rc = bnx2x_check_firmware(bp);
12461         if (rc) {
12462                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12463                 goto request_firmware_exit;
12464         }
12465
12466         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12467
12468         /* Initialize the pointers to the init arrays */
12469         /* Blob */
12470         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12471
12472         /* Opcodes */
12473         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12474
12475         /* Offsets */
12476         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12477                             be16_to_cpu_n);
12478
12479         /* STORMs firmware */
12480         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12481                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12482         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12483                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12484         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12485                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12486         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12487                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12488         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12489                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12490         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12491                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12492         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12493                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12494         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12495                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12496         /* IRO */
12497         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
12498
12499         return 0;
12500
12501 iro_alloc_err:
12502         kfree(bp->init_ops_offsets);
12503 init_offsets_alloc_err:
12504         kfree(bp->init_ops);
12505 init_ops_alloc_err:
12506         kfree(bp->init_data);
12507 request_firmware_exit:
12508         release_firmware(bp->firmware);
12509         bp->firmware = NULL;
12510
12511         return rc;
12512 }
12513
12514 static void bnx2x_release_firmware(struct bnx2x *bp)
12515 {
12516         kfree(bp->init_ops_offsets);
12517         kfree(bp->init_ops);
12518         kfree(bp->init_data);
12519         release_firmware(bp->firmware);
12520         bp->firmware = NULL;
12521 }
12522
12523 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12524         .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12525         .init_hw_cmn      = bnx2x_init_hw_common,
12526         .init_hw_port     = bnx2x_init_hw_port,
12527         .init_hw_func     = bnx2x_init_hw_func,
12528
12529         .reset_hw_cmn     = bnx2x_reset_common,
12530         .reset_hw_port    = bnx2x_reset_port,
12531         .reset_hw_func    = bnx2x_reset_func,
12532
12533         .gunzip_init      = bnx2x_gunzip_init,
12534         .gunzip_end       = bnx2x_gunzip_end,
12535
12536         .init_fw          = bnx2x_init_firmware,
12537         .release_fw       = bnx2x_release_firmware,
12538 };
12539
12540 void bnx2x__init_func_obj(struct bnx2x *bp)
12541 {
12542         /* Prepare DMAE related driver resources */
12543         bnx2x_setup_dmae(bp);
12544
12545         bnx2x_init_func_obj(bp, &bp->func_obj,
12546                             bnx2x_sp(bp, func_rdata),
12547                             bnx2x_sp_mapping(bp, func_rdata),
12548                             bnx2x_sp(bp, func_afex_rdata),
12549                             bnx2x_sp_mapping(bp, func_afex_rdata),
12550                             &bnx2x_func_sp_drv);
12551 }
12552
12553 /* must be called after sriov-enable */
12554 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12555 {
12556         int cid_count = BNX2X_L2_MAX_CID(bp);
12557
12558         if (IS_SRIOV(bp))
12559                 cid_count += BNX2X_VF_CIDS;
12560
12561         if (CNIC_SUPPORT(bp))
12562                 cid_count += CNIC_CID_MAX;
12563
12564         return roundup(cid_count, QM_CID_ROUND);
12565 }
12566
12567 /**
12568  * bnx2x_get_num_none_def_sbs - return the number of none default SBs
12569  *
12570  * @dev:        pci device
12571  *
12572  */
12573 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12574 {
12575         int index;
12576         u16 control = 0;
12577
12578         /*
12579          * If MSI-X is not supported - return number of SBs needed to support
12580          * one fast path queue: one FP queue + SB for CNIC
12581          */
12582         if (!pdev->msix_cap) {
12583                 dev_info(&pdev->dev, "no msix capability found\n");
12584                 return 1 + cnic_cnt;
12585         }
12586         dev_info(&pdev->dev, "msix capability found\n");
12587
12588         /*
12589          * The value in the PCI configuration space is the index of the last
12590          * entry, namely one less than the actual size of the table, which is
12591          * exactly what we want to return from this function: number of all SBs
12592          * without the default SB.
12593          * For VFs there is no default SB, then we return (index+1).
12594          */
12595         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
12596
12597         index = control & PCI_MSIX_FLAGS_QSIZE;
12598
12599         return index;
12600 }
12601
12602 static int set_max_cos_est(int chip_id)
12603 {
12604         switch (chip_id) {
12605         case BCM57710:
12606         case BCM57711:
12607         case BCM57711E:
12608                 return BNX2X_MULTI_TX_COS_E1X;
12609         case BCM57712:
12610         case BCM57712_MF:
12611         case BCM57712_VF:
12612                 return BNX2X_MULTI_TX_COS_E2_E3A0;
12613         case BCM57800:
12614         case BCM57800_MF:
12615         case BCM57800_VF:
12616         case BCM57810:
12617         case BCM57810_MF:
12618         case BCM57840_4_10:
12619         case BCM57840_2_20:
12620         case BCM57840_O:
12621         case BCM57840_MFO:
12622         case BCM57810_VF:
12623         case BCM57840_MF:
12624         case BCM57840_VF:
12625         case BCM57811:
12626         case BCM57811_MF:
12627         case BCM57811_VF:
12628                 return BNX2X_MULTI_TX_COS_E3B0;
12629                 return 1;
12630         default:
12631                 pr_err("Unknown board_type (%d), aborting\n", chip_id);
12632                 return -ENODEV;
12633         }
12634 }
12635
12636 static int set_is_vf(int chip_id)
12637 {
12638         switch (chip_id) {
12639         case BCM57712_VF:
12640         case BCM57800_VF:
12641         case BCM57810_VF:
12642         case BCM57840_VF:
12643         case BCM57811_VF:
12644                 return true;
12645         default:
12646                 return false;
12647         }
12648 }
12649
12650 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
12651
12652 static int bnx2x_init_one(struct pci_dev *pdev,
12653                                     const struct pci_device_id *ent)
12654 {
12655         struct net_device *dev = NULL;
12656         struct bnx2x *bp;
12657         int pcie_width;
12658         enum bnx2x_pci_bus_speed pcie_speed;
12659         int rc, max_non_def_sbs;
12660         int rx_count, tx_count, rss_count, doorbell_size;
12661         int max_cos_est;
12662         bool is_vf;
12663         int cnic_cnt;
12664
12665         /* An estimated maximum supported CoS number according to the chip
12666          * version.
12667          * We will try to roughly estimate the maximum number of CoSes this chip
12668          * may support in order to minimize the memory allocated for Tx
12669          * netdev_queue's. This number will be accurately calculated during the
12670          * initialization of bp->max_cos based on the chip versions AND chip
12671          * revision in the bnx2x_init_bp().
12672          */
12673         max_cos_est = set_max_cos_est(ent->driver_data);
12674         if (max_cos_est < 0)
12675                 return max_cos_est;
12676         is_vf = set_is_vf(ent->driver_data);
12677         cnic_cnt = is_vf ? 0 : 1;
12678
12679         max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12680
12681         /* add another SB for VF as it has no default SB */
12682         max_non_def_sbs += is_vf ? 1 : 0;
12683
12684         /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12685         rss_count = max_non_def_sbs - cnic_cnt;
12686
12687         if (rss_count < 1)
12688                 return -EINVAL;
12689
12690         /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
12691         rx_count = rss_count + cnic_cnt;
12692
12693         /* Maximum number of netdev Tx queues:
12694          * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
12695          */
12696         tx_count = rss_count * max_cos_est + cnic_cnt;
12697
12698         /* dev zeroed in init_etherdev */
12699         dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12700         if (!dev)
12701                 return -ENOMEM;
12702
12703         bp = netdev_priv(dev);
12704
12705         bp->flags = 0;
12706         if (is_vf)
12707                 bp->flags |= IS_VF_FLAG;
12708
12709         bp->igu_sb_cnt = max_non_def_sbs;
12710         bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
12711         bp->msg_enable = debug;
12712         bp->cnic_support = cnic_cnt;
12713         bp->cnic_probe = bnx2x_cnic_probe;
12714
12715         pci_set_drvdata(pdev, dev);
12716
12717         rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
12718         if (rc < 0) {
12719                 free_netdev(dev);
12720                 return rc;
12721         }
12722
12723         BNX2X_DEV_INFO("This is a %s function\n",
12724                        IS_PF(bp) ? "physical" : "virtual");
12725         BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12726         BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
12727         BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12728                        tx_count, rx_count);
12729
12730         rc = bnx2x_init_bp(bp);
12731         if (rc)
12732                 goto init_one_exit;
12733
12734         /* Map doorbells here as we need the real value of bp->max_cos which
12735          * is initialized in bnx2x_init_bp() to determine the number of
12736          * l2 connections.
12737          */
12738         if (IS_VF(bp)) {
12739                 bp->doorbells = bnx2x_vf_doorbells(bp);
12740                 rc = bnx2x_vf_pci_alloc(bp);
12741                 if (rc)
12742                         goto init_one_exit;
12743         } else {
12744                 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12745                 if (doorbell_size > pci_resource_len(pdev, 2)) {
12746                         dev_err(&bp->pdev->dev,
12747                                 "Cannot map doorbells, bar size too small, aborting\n");
12748                         rc = -ENOMEM;
12749                         goto init_one_exit;
12750                 }
12751                 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12752                                                 doorbell_size);
12753         }
12754         if (!bp->doorbells) {
12755                 dev_err(&bp->pdev->dev,
12756                         "Cannot map doorbell space, aborting\n");
12757                 rc = -ENOMEM;
12758                 goto init_one_exit;
12759         }
12760
12761         if (IS_VF(bp)) {
12762                 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12763                 if (rc)
12764                         goto init_one_exit;
12765         }
12766
12767         /* Enable SRIOV if capability found in configuration space */
12768         rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
12769         if (rc)
12770                 goto init_one_exit;
12771
12772         /* calc qm_cid_count */
12773         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12774         BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
12775
12776         /* disable FCOE L2 queue for E1x*/
12777         if (CHIP_IS_E1x(bp))
12778                 bp->flags |= NO_FCOE_FLAG;
12779
12780         /* Set bp->num_queues for MSI-X mode*/
12781         bnx2x_set_num_queues(bp);
12782
12783         /* Configure interrupt mode: try to enable MSI-X/MSI if
12784          * needed.
12785          */
12786         rc = bnx2x_set_int_mode(bp);
12787         if (rc) {
12788                 dev_err(&pdev->dev, "Cannot set interrupts\n");
12789                 goto init_one_exit;
12790         }
12791         BNX2X_DEV_INFO("set interrupts successfully\n");
12792
12793         /* register the net device */
12794         rc = register_netdev(dev);
12795         if (rc) {
12796                 dev_err(&pdev->dev, "Cannot register net device\n");
12797                 goto init_one_exit;
12798         }
12799         BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
12800
12801         if (!NO_FCOE(bp)) {
12802                 /* Add storage MAC address */
12803                 rtnl_lock();
12804                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12805                 rtnl_unlock();
12806         }
12807
12808         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12809         BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
12810                        pcie_width, pcie_speed);
12811
12812         BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12813                        board_info[ent->driver_data].name,
12814                        (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12815                        pcie_width,
12816                        pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
12817                        pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
12818                        pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
12819                        "Unknown",
12820                        dev->base_addr, bp->pdev->irq, dev->dev_addr);
12821
12822         return 0;
12823
12824 init_one_exit:
12825         if (bp->regview)
12826                 iounmap(bp->regview);
12827
12828         if (IS_PF(bp) && bp->doorbells)
12829                 iounmap(bp->doorbells);
12830
12831         free_netdev(dev);
12832
12833         if (atomic_read(&pdev->enable_cnt) == 1)
12834                 pci_release_regions(pdev);
12835
12836         pci_disable_device(pdev);
12837         pci_set_drvdata(pdev, NULL);
12838
12839         return rc;
12840 }
12841
12842 static void __bnx2x_remove(struct pci_dev *pdev,
12843                            struct net_device *dev,
12844                            struct bnx2x *bp,
12845                            bool remove_netdev)
12846 {
12847         /* Delete storage MAC address */
12848         if (!NO_FCOE(bp)) {
12849                 rtnl_lock();
12850                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12851                 rtnl_unlock();
12852         }
12853
12854 #ifdef BCM_DCBNL
12855         /* Delete app tlvs from dcbnl */
12856         bnx2x_dcbnl_update_applist(bp, true);
12857 #endif
12858
12859         if (IS_PF(bp) &&
12860             !BP_NOMCP(bp) &&
12861             (bp->flags & BC_SUPPORTS_RMMOD_CMD))
12862                 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
12863
12864         /* Close the interface - either directly or implicitly */
12865         if (remove_netdev) {
12866                 unregister_netdev(dev);
12867         } else {
12868                 rtnl_lock();
12869                 dev_close(dev);
12870                 rtnl_unlock();
12871         }
12872
12873         bnx2x_iov_remove_one(bp);
12874
12875         /* Power on: we can't let PCI layer write to us while we are in D3 */
12876         if (IS_PF(bp))
12877                 bnx2x_set_power_state(bp, PCI_D0);
12878
12879         /* Disable MSI/MSI-X */
12880         bnx2x_disable_msi(bp);
12881
12882         /* Power off */
12883         if (IS_PF(bp))
12884                 bnx2x_set_power_state(bp, PCI_D3hot);
12885
12886         /* Make sure RESET task is not scheduled before continuing */
12887         cancel_delayed_work_sync(&bp->sp_rtnl_task);
12888
12889         /* send message via vfpf channel to release the resources of this vf */
12890         if (IS_VF(bp))
12891                 bnx2x_vfpf_release(bp);
12892
12893         /* Assumes no further PCIe PM changes will occur */
12894         if (system_state == SYSTEM_POWER_OFF) {
12895                 pci_wake_from_d3(pdev, bp->wol);
12896                 pci_set_power_state(pdev, PCI_D3hot);
12897         }
12898
12899         if (bp->regview)
12900                 iounmap(bp->regview);
12901
12902         /* for vf doorbells are part of the regview and were unmapped along with
12903          * it. FW is only loaded by PF.
12904          */
12905         if (IS_PF(bp)) {
12906                 if (bp->doorbells)
12907                         iounmap(bp->doorbells);
12908
12909                 bnx2x_release_firmware(bp);
12910         }
12911         bnx2x_free_mem_bp(bp);
12912
12913         if (remove_netdev)
12914                 free_netdev(dev);
12915
12916         if (atomic_read(&pdev->enable_cnt) == 1)
12917                 pci_release_regions(pdev);
12918
12919         pci_disable_device(pdev);
12920         pci_set_drvdata(pdev, NULL);
12921 }
12922
12923 static void bnx2x_remove_one(struct pci_dev *pdev)
12924 {
12925         struct net_device *dev = pci_get_drvdata(pdev);
12926         struct bnx2x *bp;
12927
12928         if (!dev) {
12929                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12930                 return;
12931         }
12932         bp = netdev_priv(dev);
12933
12934         __bnx2x_remove(pdev, dev, bp, true);
12935 }
12936
12937 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12938 {
12939         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
12940
12941         bp->rx_mode = BNX2X_RX_MODE_NONE;
12942
12943         if (CNIC_LOADED(bp))
12944                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12945
12946         /* Stop Tx */
12947         bnx2x_tx_disable(bp);
12948         /* Delete all NAPI objects */
12949         bnx2x_del_all_napi(bp);
12950         if (CNIC_LOADED(bp))
12951                 bnx2x_del_all_napi_cnic(bp);
12952         netdev_reset_tc(bp->dev);
12953
12954         del_timer_sync(&bp->timer);
12955         cancel_delayed_work(&bp->sp_task);
12956         cancel_delayed_work(&bp->period_task);
12957
12958         spin_lock_bh(&bp->stats_lock);
12959         bp->stats_state = STATS_STATE_DISABLED;
12960         spin_unlock_bh(&bp->stats_lock);
12961
12962         bnx2x_save_statistics(bp);
12963
12964         netif_carrier_off(bp->dev);
12965
12966         return 0;
12967 }
12968
12969 /**
12970  * bnx2x_io_error_detected - called when PCI error is detected
12971  * @pdev: Pointer to PCI device
12972  * @state: The current pci connection state
12973  *
12974  * This function is called after a PCI bus error affecting
12975  * this device has been detected.
12976  */
12977 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12978                                                 pci_channel_state_t state)
12979 {
12980         struct net_device *dev = pci_get_drvdata(pdev);
12981         struct bnx2x *bp = netdev_priv(dev);
12982
12983         rtnl_lock();
12984
12985         BNX2X_ERR("IO error detected\n");
12986
12987         netif_device_detach(dev);
12988
12989         if (state == pci_channel_io_perm_failure) {
12990                 rtnl_unlock();
12991                 return PCI_ERS_RESULT_DISCONNECT;
12992         }
12993
12994         if (netif_running(dev))
12995                 bnx2x_eeh_nic_unload(bp);
12996
12997         bnx2x_prev_path_mark_eeh(bp);
12998
12999         pci_disable_device(pdev);
13000
13001         rtnl_unlock();
13002
13003         /* Request a slot reset */
13004         return PCI_ERS_RESULT_NEED_RESET;
13005 }
13006
13007 /**
13008  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13009  * @pdev: Pointer to PCI device
13010  *
13011  * Restart the card from scratch, as if from a cold-boot.
13012  */
13013 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13014 {
13015         struct net_device *dev = pci_get_drvdata(pdev);
13016         struct bnx2x *bp = netdev_priv(dev);
13017         int i;
13018
13019         rtnl_lock();
13020         BNX2X_ERR("IO slot reset initializing...\n");
13021         if (pci_enable_device(pdev)) {
13022                 dev_err(&pdev->dev,
13023                         "Cannot re-enable PCI device after reset\n");
13024                 rtnl_unlock();
13025                 return PCI_ERS_RESULT_DISCONNECT;
13026         }
13027
13028         pci_set_master(pdev);
13029         pci_restore_state(pdev);
13030         pci_save_state(pdev);
13031
13032         if (netif_running(dev))
13033                 bnx2x_set_power_state(bp, PCI_D0);
13034
13035         if (netif_running(dev)) {
13036                 BNX2X_ERR("IO slot reset --> driver unload\n");
13037
13038                 /* MCP should have been reset; Need to wait for validity */
13039                 bnx2x_init_shmem(bp);
13040
13041                 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13042                         u32 v;
13043
13044                         v = SHMEM2_RD(bp,
13045                                       drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13046                         SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13047                                   v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13048                 }
13049                 bnx2x_drain_tx_queues(bp);
13050                 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13051                 bnx2x_netif_stop(bp, 1);
13052                 bnx2x_free_irq(bp);
13053
13054                 /* Report UNLOAD_DONE to MCP */
13055                 bnx2x_send_unload_done(bp, true);
13056
13057                 bp->sp_state = 0;
13058                 bp->port.pmf = 0;
13059
13060                 bnx2x_prev_unload(bp);
13061
13062                 /* We should have reseted the engine, so It's fair to
13063                  * assume the FW will no longer write to the bnx2x driver.
13064                  */
13065                 bnx2x_squeeze_objects(bp);
13066                 bnx2x_free_skbs(bp);
13067                 for_each_rx_queue(bp, i)
13068                         bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13069                 bnx2x_free_fp_mem(bp);
13070                 bnx2x_free_mem(bp);
13071
13072                 bp->state = BNX2X_STATE_CLOSED;
13073         }
13074
13075         rtnl_unlock();
13076
13077         return PCI_ERS_RESULT_RECOVERED;
13078 }
13079
13080 /**
13081  * bnx2x_io_resume - called when traffic can start flowing again
13082  * @pdev: Pointer to PCI device
13083  *
13084  * This callback is called when the error recovery driver tells us that
13085  * its OK to resume normal operation.
13086  */
13087 static void bnx2x_io_resume(struct pci_dev *pdev)
13088 {
13089         struct net_device *dev = pci_get_drvdata(pdev);
13090         struct bnx2x *bp = netdev_priv(dev);
13091
13092         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13093                 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
13094                 return;
13095         }
13096
13097         rtnl_lock();
13098
13099         bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13100                                                         DRV_MSG_SEQ_NUMBER_MASK;
13101
13102         if (netif_running(dev))
13103                 bnx2x_nic_load(bp, LOAD_NORMAL);
13104
13105         netif_device_attach(dev);
13106
13107         rtnl_unlock();
13108 }
13109
13110 static const struct pci_error_handlers bnx2x_err_handler = {
13111         .error_detected = bnx2x_io_error_detected,
13112         .slot_reset     = bnx2x_io_slot_reset,
13113         .resume         = bnx2x_io_resume,
13114 };
13115
13116 static void bnx2x_shutdown(struct pci_dev *pdev)
13117 {
13118         struct net_device *dev = pci_get_drvdata(pdev);
13119         struct bnx2x *bp;
13120
13121         if (!dev)
13122                 return;
13123
13124         bp = netdev_priv(dev);
13125         if (!bp)
13126                 return;
13127
13128         rtnl_lock();
13129         netif_device_detach(dev);
13130         rtnl_unlock();
13131
13132         /* Don't remove the netdevice, as there are scenarios which will cause
13133          * the kernel to hang, e.g., when trying to remove bnx2i while the
13134          * rootfs is mounted from SAN.
13135          */
13136         __bnx2x_remove(pdev, dev, bp, false);
13137 }
13138
13139 static struct pci_driver bnx2x_pci_driver = {
13140         .name        = DRV_MODULE_NAME,
13141         .id_table    = bnx2x_pci_tbl,
13142         .probe       = bnx2x_init_one,
13143         .remove      = bnx2x_remove_one,
13144         .suspend     = bnx2x_suspend,
13145         .resume      = bnx2x_resume,
13146         .err_handler = &bnx2x_err_handler,
13147 #ifdef CONFIG_BNX2X_SRIOV
13148         .sriov_configure = bnx2x_sriov_configure,
13149 #endif
13150         .shutdown    = bnx2x_shutdown,
13151 };
13152
13153 static int __init bnx2x_init(void)
13154 {
13155         int ret;
13156
13157         pr_info("%s", version);
13158
13159         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13160         if (bnx2x_wq == NULL) {
13161                 pr_err("Cannot create workqueue\n");
13162                 return -ENOMEM;
13163         }
13164
13165         ret = pci_register_driver(&bnx2x_pci_driver);
13166         if (ret) {
13167                 pr_err("Cannot register driver\n");
13168                 destroy_workqueue(bnx2x_wq);
13169         }
13170         return ret;
13171 }
13172
13173 static void __exit bnx2x_cleanup(void)
13174 {
13175         struct list_head *pos, *q;
13176
13177         pci_unregister_driver(&bnx2x_pci_driver);
13178
13179         destroy_workqueue(bnx2x_wq);
13180
13181         /* Free globally allocated resources */
13182         list_for_each_safe(pos, q, &bnx2x_prev_list) {
13183                 struct bnx2x_prev_path_list *tmp =
13184                         list_entry(pos, struct bnx2x_prev_path_list, list);
13185                 list_del(pos);
13186                 kfree(tmp);
13187         }
13188 }
13189
13190 void bnx2x_notify_link_changed(struct bnx2x *bp)
13191 {
13192         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13193 }
13194
13195 module_init(bnx2x_init);
13196 module_exit(bnx2x_cleanup);
13197
13198 /**
13199  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
13200  *
13201  * @bp:         driver handle
13202  * @set:        set or clear the CAM entry
13203  *
13204  * This function will wait until the ramrod completion returns.
13205  * Return 0 if success, -ENODEV if ramrod doesn't return.
13206  */
13207 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
13208 {
13209         unsigned long ramrod_flags = 0;
13210
13211         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13212         return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13213                                  &bp->iscsi_l2_mac_obj, true,
13214                                  BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13215 }
13216
13217 /* count denotes the number of new completions we have seen */
13218 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13219 {
13220         struct eth_spe *spe;
13221         int cxt_index, cxt_offset;
13222
13223 #ifdef BNX2X_STOP_ON_ERROR
13224         if (unlikely(bp->panic))
13225                 return;
13226 #endif
13227
13228         spin_lock_bh(&bp->spq_lock);
13229         BUG_ON(bp->cnic_spq_pending < count);
13230         bp->cnic_spq_pending -= count;
13231
13232         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13233                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13234                                 & SPE_HDR_CONN_TYPE) >>
13235                                 SPE_HDR_CONN_TYPE_SHIFT;
13236                 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13237                                 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
13238
13239                 /* Set validation for iSCSI L2 client before sending SETUP
13240                  *  ramrod
13241                  */
13242                 if (type == ETH_CONNECTION_TYPE) {
13243                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
13244                                 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
13245                                         ILT_PAGE_CIDS;
13246                                 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
13247                                         (cxt_index * ILT_PAGE_CIDS);
13248                                 bnx2x_set_ctx_validation(bp,
13249                                         &bp->context[cxt_index].
13250                                                          vcxt[cxt_offset].eth,
13251                                         BNX2X_ISCSI_ETH_CID(bp));
13252                         }
13253                 }
13254
13255                 /*
13256                  * There may be not more than 8 L2, not more than 8 L5 SPEs
13257                  * and in the air. We also check that number of outstanding
13258                  * COMMON ramrods is not more than the EQ and SPQ can
13259                  * accommodate.
13260                  */
13261                 if (type == ETH_CONNECTION_TYPE) {
13262                         if (!atomic_read(&bp->cq_spq_left))
13263                                 break;
13264                         else
13265                                 atomic_dec(&bp->cq_spq_left);
13266                 } else if (type == NONE_CONNECTION_TYPE) {
13267                         if (!atomic_read(&bp->eq_spq_left))
13268                                 break;
13269                         else
13270                                 atomic_dec(&bp->eq_spq_left);
13271                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
13272                            (type == FCOE_CONNECTION_TYPE)) {
13273                         if (bp->cnic_spq_pending >=
13274                             bp->cnic_eth_dev.max_kwqe_pending)
13275                                 break;
13276                         else
13277                                 bp->cnic_spq_pending++;
13278                 } else {
13279                         BNX2X_ERR("Unknown SPE type: %d\n", type);
13280                         bnx2x_panic();
13281                         break;
13282                 }
13283
13284                 spe = bnx2x_sp_get_next(bp);
13285                 *spe = *bp->cnic_kwq_cons;
13286
13287                 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
13288                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13289
13290                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13291                         bp->cnic_kwq_cons = bp->cnic_kwq;
13292                 else
13293                         bp->cnic_kwq_cons++;
13294         }
13295         bnx2x_sp_prod_update(bp);
13296         spin_unlock_bh(&bp->spq_lock);
13297 }
13298
13299 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13300                                struct kwqe_16 *kwqes[], u32 count)
13301 {
13302         struct bnx2x *bp = netdev_priv(dev);
13303         int i;
13304
13305 #ifdef BNX2X_STOP_ON_ERROR
13306         if (unlikely(bp->panic)) {
13307                 BNX2X_ERR("Can't post to SP queue while panic\n");
13308                 return -EIO;
13309         }
13310 #endif
13311
13312         if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
13313             (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
13314                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
13315                 return -EAGAIN;
13316         }
13317
13318         spin_lock_bh(&bp->spq_lock);
13319
13320         for (i = 0; i < count; i++) {
13321                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13322
13323                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13324                         break;
13325
13326                 *bp->cnic_kwq_prod = *spe;
13327
13328                 bp->cnic_kwq_pending++;
13329
13330                 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
13331                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13332                    spe->data.update_data_addr.hi,
13333                    spe->data.update_data_addr.lo,
13334                    bp->cnic_kwq_pending);
13335
13336                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13337                         bp->cnic_kwq_prod = bp->cnic_kwq;
13338                 else
13339                         bp->cnic_kwq_prod++;
13340         }
13341
13342         spin_unlock_bh(&bp->spq_lock);
13343
13344         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13345                 bnx2x_cnic_sp_post(bp, 0);
13346
13347         return i;
13348 }
13349
13350 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13351 {
13352         struct cnic_ops *c_ops;
13353         int rc = 0;
13354
13355         mutex_lock(&bp->cnic_mutex);
13356         c_ops = rcu_dereference_protected(bp->cnic_ops,
13357                                           lockdep_is_held(&bp->cnic_mutex));
13358         if (c_ops)
13359                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13360         mutex_unlock(&bp->cnic_mutex);
13361
13362         return rc;
13363 }
13364
13365 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13366 {
13367         struct cnic_ops *c_ops;
13368         int rc = 0;
13369
13370         rcu_read_lock();
13371         c_ops = rcu_dereference(bp->cnic_ops);
13372         if (c_ops)
13373                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13374         rcu_read_unlock();
13375
13376         return rc;
13377 }
13378
13379 /*
13380  * for commands that have no data
13381  */
13382 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13383 {
13384         struct cnic_ctl_info ctl = {0};
13385
13386         ctl.cmd = cmd;
13387
13388         return bnx2x_cnic_ctl_send(bp, &ctl);
13389 }
13390
13391 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
13392 {
13393         struct cnic_ctl_info ctl = {0};
13394
13395         /* first we tell CNIC and only then we count this as a completion */
13396         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13397         ctl.data.comp.cid = cid;
13398         ctl.data.comp.error = err;
13399
13400         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13401         bnx2x_cnic_sp_post(bp, 0);
13402 }
13403
13404 /* Called with netif_addr_lock_bh() taken.
13405  * Sets an rx_mode config for an iSCSI ETH client.
13406  * Doesn't block.
13407  * Completion should be checked outside.
13408  */
13409 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13410 {
13411         unsigned long accept_flags = 0, ramrod_flags = 0;
13412         u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13413         int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13414
13415         if (start) {
13416                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
13417                  * because it's the only way for UIO Queue to accept
13418                  * multicasts (in non-promiscuous mode only one Queue per
13419                  * function will receive multicast packets (leading in our
13420                  * case).
13421                  */
13422                 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13423                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13424                 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13425                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13426
13427                 /* Clear STOP_PENDING bit if START is requested */
13428                 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13429
13430                 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13431         } else
13432                 /* Clear START_PENDING bit if STOP is requested */
13433                 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13434
13435         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13436                 set_bit(sched_state, &bp->sp_state);
13437         else {
13438                 __set_bit(RAMROD_RX, &ramrod_flags);
13439                 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13440                                     ramrod_flags);
13441         }
13442 }
13443
13444 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13445 {
13446         struct bnx2x *bp = netdev_priv(dev);
13447         int rc = 0;
13448
13449         switch (ctl->cmd) {
13450         case DRV_CTL_CTXTBL_WR_CMD: {
13451                 u32 index = ctl->data.io.offset;
13452                 dma_addr_t addr = ctl->data.io.dma_addr;
13453
13454                 bnx2x_ilt_wr(bp, index, addr);
13455                 break;
13456         }
13457
13458         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13459                 int count = ctl->data.credit.credit_count;
13460
13461                 bnx2x_cnic_sp_post(bp, count);
13462                 break;
13463         }
13464
13465         /* rtnl_lock is held.  */
13466         case DRV_CTL_START_L2_CMD: {
13467                 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13468                 unsigned long sp_bits = 0;
13469
13470                 /* Configure the iSCSI classification object */
13471                 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13472                                    cp->iscsi_l2_client_id,
13473                                    cp->iscsi_l2_cid, BP_FUNC(bp),
13474                                    bnx2x_sp(bp, mac_rdata),
13475                                    bnx2x_sp_mapping(bp, mac_rdata),
13476                                    BNX2X_FILTER_MAC_PENDING,
13477                                    &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13478                                    &bp->macs_pool);
13479
13480                 /* Set iSCSI MAC address */
13481                 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13482                 if (rc)
13483                         break;
13484
13485                 mmiowb();
13486                 barrier();
13487
13488                 /* Start accepting on iSCSI L2 ring */
13489
13490                 netif_addr_lock_bh(dev);
13491                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13492                 netif_addr_unlock_bh(dev);
13493
13494                 /* bits to wait on */
13495                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13496                 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13497
13498                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13499                         BNX2X_ERR("rx_mode completion timed out!\n");
13500
13501                 break;
13502         }
13503
13504         /* rtnl_lock is held.  */
13505         case DRV_CTL_STOP_L2_CMD: {
13506                 unsigned long sp_bits = 0;
13507
13508                 /* Stop accepting on iSCSI L2 ring */
13509                 netif_addr_lock_bh(dev);
13510                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13511                 netif_addr_unlock_bh(dev);
13512
13513                 /* bits to wait on */
13514                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13515                 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13516
13517                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13518                         BNX2X_ERR("rx_mode completion timed out!\n");
13519
13520                 mmiowb();
13521                 barrier();
13522
13523                 /* Unset iSCSI L2 MAC */
13524                 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13525                                         BNX2X_ISCSI_ETH_MAC, true);
13526                 break;
13527         }
13528         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13529                 int count = ctl->data.credit.credit_count;
13530
13531                 smp_mb__before_atomic_inc();
13532                 atomic_add(count, &bp->cq_spq_left);
13533                 smp_mb__after_atomic_inc();
13534                 break;
13535         }
13536         case DRV_CTL_ULP_REGISTER_CMD: {
13537                 int ulp_type = ctl->data.register_data.ulp_type;
13538
13539                 if (CHIP_IS_E3(bp)) {
13540                         int idx = BP_FW_MB_IDX(bp);
13541                         u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13542                         int path = BP_PATH(bp);
13543                         int port = BP_PORT(bp);
13544                         int i;
13545                         u32 scratch_offset;
13546                         u32 *host_addr;
13547
13548                         /* first write capability to shmem2 */
13549                         if (ulp_type == CNIC_ULP_ISCSI)
13550                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13551                         else if (ulp_type == CNIC_ULP_FCOE)
13552                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13553                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13554
13555                         if ((ulp_type != CNIC_ULP_FCOE) ||
13556                             (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13557                             (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
13558                                 break;
13559
13560                         /* if reached here - should write fcoe capabilities */
13561                         scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13562                         if (!scratch_offset)
13563                                 break;
13564                         scratch_offset += offsetof(struct glob_ncsi_oem_data,
13565                                                    fcoe_features[path][port]);
13566                         host_addr = (u32 *) &(ctl->data.register_data.
13567                                               fcoe_features);
13568                         for (i = 0; i < sizeof(struct fcoe_capabilities);
13569                              i += 4)
13570                                 REG_WR(bp, scratch_offset + i,
13571                                        *(host_addr + i/4));
13572                 }
13573                 break;
13574         }
13575
13576         case DRV_CTL_ULP_UNREGISTER_CMD: {
13577                 int ulp_type = ctl->data.ulp_type;
13578
13579                 if (CHIP_IS_E3(bp)) {
13580                         int idx = BP_FW_MB_IDX(bp);
13581                         u32 cap;
13582
13583                         cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13584                         if (ulp_type == CNIC_ULP_ISCSI)
13585                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13586                         else if (ulp_type == CNIC_ULP_FCOE)
13587                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13588                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13589                 }
13590                 break;
13591         }
13592
13593         default:
13594                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13595                 rc = -EINVAL;
13596         }
13597
13598         return rc;
13599 }
13600
13601 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13602 {
13603         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13604
13605         if (bp->flags & USING_MSIX_FLAG) {
13606                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13607                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13608                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13609         } else {
13610                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13611                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13612         }
13613         if (!CHIP_IS_E1x(bp))
13614                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13615         else
13616                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13617
13618         cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
13619         cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
13620         cp->irq_arr[1].status_blk = bp->def_status_blk;
13621         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13622         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
13623
13624         cp->num_irq = 2;
13625 }
13626
13627 void bnx2x_setup_cnic_info(struct bnx2x *bp)
13628 {
13629         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13630
13631         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13632                              bnx2x_cid_ilt_lines(bp);
13633         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13634         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13635         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13636
13637         DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
13638            BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
13639            cp->iscsi_l2_cid);
13640
13641         if (NO_ISCSI_OOO(bp))
13642                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13643 }
13644
13645 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13646                                void *data)
13647 {
13648         struct bnx2x *bp = netdev_priv(dev);
13649         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13650         int rc;
13651
13652         DP(NETIF_MSG_IFUP, "Register_cnic called\n");
13653
13654         if (ops == NULL) {
13655                 BNX2X_ERR("NULL ops received\n");
13656                 return -EINVAL;
13657         }
13658
13659         if (!CNIC_SUPPORT(bp)) {
13660                 BNX2X_ERR("Can't register CNIC when not supported\n");
13661                 return -EOPNOTSUPP;
13662         }
13663
13664         if (!CNIC_LOADED(bp)) {
13665                 rc = bnx2x_load_cnic(bp);
13666                 if (rc) {
13667                         BNX2X_ERR("CNIC-related load failed\n");
13668                         return rc;
13669                 }
13670         }
13671
13672         bp->cnic_enabled = true;
13673
13674         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13675         if (!bp->cnic_kwq)
13676                 return -ENOMEM;
13677
13678         bp->cnic_kwq_cons = bp->cnic_kwq;
13679         bp->cnic_kwq_prod = bp->cnic_kwq;
13680         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13681
13682         bp->cnic_spq_pending = 0;
13683         bp->cnic_kwq_pending = 0;
13684
13685         bp->cnic_data = data;
13686
13687         cp->num_irq = 0;
13688         cp->drv_state |= CNIC_DRV_STATE_REGD;
13689         cp->iro_arr = bp->iro_arr;
13690
13691         bnx2x_setup_cnic_irq_info(bp);
13692
13693         rcu_assign_pointer(bp->cnic_ops, ops);
13694
13695         return 0;
13696 }
13697
13698 static int bnx2x_unregister_cnic(struct net_device *dev)
13699 {
13700         struct bnx2x *bp = netdev_priv(dev);
13701         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13702
13703         mutex_lock(&bp->cnic_mutex);
13704         cp->drv_state = 0;
13705         RCU_INIT_POINTER(bp->cnic_ops, NULL);
13706         mutex_unlock(&bp->cnic_mutex);
13707         synchronize_rcu();
13708         bp->cnic_enabled = false;
13709         kfree(bp->cnic_kwq);
13710         bp->cnic_kwq = NULL;
13711
13712         return 0;
13713 }
13714
13715 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13716 {
13717         struct bnx2x *bp = netdev_priv(dev);
13718         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13719
13720         /* If both iSCSI and FCoE are disabled - return NULL in
13721          * order to indicate CNIC that it should not try to work
13722          * with this device.
13723          */
13724         if (NO_ISCSI(bp) && NO_FCOE(bp))
13725                 return NULL;
13726
13727         cp->drv_owner = THIS_MODULE;
13728         cp->chip_id = CHIP_ID(bp);
13729         cp->pdev = bp->pdev;
13730         cp->io_base = bp->regview;
13731         cp->io_base2 = bp->doorbells;
13732         cp->max_kwqe_pending = 8;
13733         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
13734         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13735                              bnx2x_cid_ilt_lines(bp);
13736         cp->ctx_tbl_len = CNIC_ILT_LINES;
13737         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13738         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13739         cp->drv_ctl = bnx2x_drv_ctl;
13740         cp->drv_register_cnic = bnx2x_register_cnic;
13741         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13742         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13743         cp->iscsi_l2_client_id =
13744                 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13745         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13746
13747         if (NO_ISCSI_OOO(bp))
13748                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13749
13750         if (NO_ISCSI(bp))
13751                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13752
13753         if (NO_FCOE(bp))
13754                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13755
13756         BNX2X_DEV_INFO(
13757                 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
13758            cp->ctx_blk_size,
13759            cp->ctx_tbl_offset,
13760            cp->ctx_tbl_len,
13761            cp->starting_cid);
13762         return cp;
13763 }
13764
13765 u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
13766 {
13767         struct bnx2x *bp = fp->bp;
13768         u32 offset = BAR_USTRORM_INTMEM;
13769
13770         if (IS_VF(bp))
13771                 return bnx2x_vf_ustorm_prods_offset(bp, fp);
13772         else if (!CHIP_IS_E1x(bp))
13773                 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
13774         else
13775                 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
13776
13777         return offset;
13778 }
13779
13780 /* called only on E1H or E2.
13781  * When pretending to be PF, the pretend value is the function number 0...7
13782  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
13783  * combination
13784  */
13785 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
13786 {
13787         u32 pretend_reg;
13788
13789         if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
13790                 return -1;
13791
13792         /* get my own pretend register */
13793         pretend_reg = bnx2x_get_pretend_reg(bp);
13794         REG_WR(bp, pretend_reg, pretend_func_val);
13795         REG_RD(bp, pretend_reg);
13796         return 0;
13797 }