]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Move tg3_set_rx_mode
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714
2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717         u32 val;
2718
2719         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723
2724                         sg_dig_ctrl |=
2725                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728                 }
2729                 return;
2730         }
2731
2732         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733                 tg3_bmcr_reset(tp);
2734                 val = tr32(GRC_MISC_CFG);
2735                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736                 udelay(40);
2737                 return;
2738         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739                 u32 phytest;
2740                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741                         u32 phy;
2742
2743                         tg3_writephy(tp, MII_ADVERTISE, 0);
2744                         tg3_writephy(tp, MII_BMCR,
2745                                      BMCR_ANENABLE | BMCR_ANRESTART);
2746
2747                         tg3_writephy(tp, MII_TG3_FET_TEST,
2748                                      phytest | MII_TG3_FET_SHADOW_EN);
2749                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751                                 tg3_writephy(tp,
2752                                              MII_TG3_FET_SHDW_AUXMODE4,
2753                                              phy);
2754                         }
2755                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756                 }
2757                 return;
2758         } else if (do_low_power) {
2759                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761
2762                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2765                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766         }
2767
2768         /* The PHY should not be powered down on some chips because
2769          * of bugs.
2770          */
2771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775                 return;
2776
2777         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783         }
2784
2785         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787
2788 /* tp->lock is held. */
2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791         if (tg3_flag(tp, NVRAM)) {
2792                 int i;
2793
2794                 if (tp->nvram_lock_cnt == 0) {
2795                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796                         for (i = 0; i < 8000; i++) {
2797                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798                                         break;
2799                                 udelay(20);
2800                         }
2801                         if (i == 8000) {
2802                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803                                 return -ENODEV;
2804                         }
2805                 }
2806                 tp->nvram_lock_cnt++;
2807         }
2808         return 0;
2809 }
2810
2811 /* tp->lock is held. */
2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814         if (tg3_flag(tp, NVRAM)) {
2815                 if (tp->nvram_lock_cnt > 0)
2816                         tp->nvram_lock_cnt--;
2817                 if (tp->nvram_lock_cnt == 0)
2818                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819         }
2820 }
2821
2822 /* tp->lock is held. */
2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826                 u32 nvaccess = tr32(NVRAM_ACCESS);
2827
2828                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829         }
2830 }
2831
2832 /* tp->lock is held. */
2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836                 u32 nvaccess = tr32(NVRAM_ACCESS);
2837
2838                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839         }
2840 }
2841
2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843                                         u32 offset, u32 *val)
2844 {
2845         u32 tmp;
2846         int i;
2847
2848         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849                 return -EINVAL;
2850
2851         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852                                         EEPROM_ADDR_DEVID_MASK |
2853                                         EEPROM_ADDR_READ);
2854         tw32(GRC_EEPROM_ADDR,
2855              tmp |
2856              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858               EEPROM_ADDR_ADDR_MASK) |
2859              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860
2861         for (i = 0; i < 1000; i++) {
2862                 tmp = tr32(GRC_EEPROM_ADDR);
2863
2864                 if (tmp & EEPROM_ADDR_COMPLETE)
2865                         break;
2866                 msleep(1);
2867         }
2868         if (!(tmp & EEPROM_ADDR_COMPLETE))
2869                 return -EBUSY;
2870
2871         tmp = tr32(GRC_EEPROM_DATA);
2872
2873         /*
2874          * The data will always be opposite the native endian
2875          * format.  Perform a blind byteswap to compensate.
2876          */
2877         *val = swab32(tmp);
2878
2879         return 0;
2880 }
2881
2882 #define NVRAM_CMD_TIMEOUT 10000
2883
2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886         int i;
2887
2888         tw32(NVRAM_CMD, nvram_cmd);
2889         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890                 udelay(10);
2891                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892                         udelay(10);
2893                         break;
2894                 }
2895         }
2896
2897         if (i == NVRAM_CMD_TIMEOUT)
2898                 return -EBUSY;
2899
2900         return 0;
2901 }
2902
2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905         if (tg3_flag(tp, NVRAM) &&
2906             tg3_flag(tp, NVRAM_BUFFERED) &&
2907             tg3_flag(tp, FLASH) &&
2908             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909             (tp->nvram_jedecnum == JEDEC_ATMEL))
2910
2911                 addr = ((addr / tp->nvram_pagesize) <<
2912                         ATMEL_AT45DB0X1B_PAGE_POS) +
2913                        (addr % tp->nvram_pagesize);
2914
2915         return addr;
2916 }
2917
2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920         if (tg3_flag(tp, NVRAM) &&
2921             tg3_flag(tp, NVRAM_BUFFERED) &&
2922             tg3_flag(tp, FLASH) &&
2923             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924             (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927                         tp->nvram_pagesize) +
2928                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929
2930         return addr;
2931 }
2932
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934  * the byteswapping settings for all other register accesses.
2935  * tg3 devices are BE devices, so on a BE machine, the data
2936  * returned will be exactly as it is seen in NVRAM.  On a LE
2937  * machine, the 32-bit value will be byteswapped.
2938  */
2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941         int ret;
2942
2943         if (!tg3_flag(tp, NVRAM))
2944                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2945
2946         offset = tg3_nvram_phys_addr(tp, offset);
2947
2948         if (offset > NVRAM_ADDR_MSK)
2949                 return -EINVAL;
2950
2951         ret = tg3_nvram_lock(tp);
2952         if (ret)
2953                 return ret;
2954
2955         tg3_enable_nvram_access(tp);
2956
2957         tw32(NVRAM_ADDR, offset);
2958         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960
2961         if (ret == 0)
2962                 *val = tr32(NVRAM_RDDATA);
2963
2964         tg3_disable_nvram_access(tp);
2965
2966         tg3_nvram_unlock(tp);
2967
2968         return ret;
2969 }
2970
2971 /* Ensures NVRAM data is in bytestream format. */
2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974         u32 v;
2975         int res = tg3_nvram_read(tp, offset, &v);
2976         if (!res)
2977                 *val = cpu_to_be32(v);
2978         return res;
2979 }
2980
2981 #define RX_CPU_SCRATCH_BASE     0x30000
2982 #define RX_CPU_SCRATCH_SIZE     0x04000
2983 #define TX_CPU_SCRATCH_BASE     0x34000
2984 #define TX_CPU_SCRATCH_SIZE     0x04000
2985
2986 /* tp->lock is held. */
2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989         int i;
2990
2991         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992
2993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995
2996                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997                 return 0;
2998         }
2999         if (offset == RX_CPU_BASE) {
3000                 for (i = 0; i < 10000; i++) {
3001                         tw32(offset + CPU_STATE, 0xffffffff);
3002                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3003                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004                                 break;
3005                 }
3006
3007                 tw32(offset + CPU_STATE, 0xffffffff);
3008                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3009                 udelay(10);
3010         } else {
3011                 for (i = 0; i < 10000; i++) {
3012                         tw32(offset + CPU_STATE, 0xffffffff);
3013                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3014                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015                                 break;
3016                 }
3017         }
3018
3019         if (i >= 10000) {
3020                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022                 return -ENODEV;
3023         }
3024
3025         /* Clear firmware's nvram arbitration. */
3026         if (tg3_flag(tp, NVRAM))
3027                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028         return 0;
3029 }
3030
3031 struct fw_info {
3032         unsigned int fw_base;
3033         unsigned int fw_len;
3034         const __be32 *fw_data;
3035 };
3036
3037 /* tp->lock is held. */
3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039                                  u32 cpu_scratch_base, int cpu_scratch_size,
3040                                  struct fw_info *info)
3041 {
3042         int err, lock_err, i;
3043         void (*write_op)(struct tg3 *, u32, u32);
3044
3045         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046                 netdev_err(tp->dev,
3047                            "%s: Trying to load TX cpu firmware which is 5705\n",
3048                            __func__);
3049                 return -EINVAL;
3050         }
3051
3052         if (tg3_flag(tp, 5705_PLUS))
3053                 write_op = tg3_write_mem;
3054         else
3055                 write_op = tg3_write_indirect_reg32;
3056
3057         /* It is possible that bootcode is still loading at this point.
3058          * Get the nvram lock first before halting the cpu.
3059          */
3060         lock_err = tg3_nvram_lock(tp);
3061         err = tg3_halt_cpu(tp, cpu_base);
3062         if (!lock_err)
3063                 tg3_nvram_unlock(tp);
3064         if (err)
3065                 goto out;
3066
3067         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068                 write_op(tp, cpu_scratch_base + i, 0);
3069         tw32(cpu_base + CPU_STATE, 0xffffffff);
3070         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072                 write_op(tp, (cpu_scratch_base +
3073                               (info->fw_base & 0xffff) +
3074                               (i * sizeof(u32))),
3075                               be32_to_cpu(info->fw_data[i]));
3076
3077         err = 0;
3078
3079 out:
3080         return err;
3081 }
3082
3083 /* tp->lock is held. */
3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086         struct fw_info info;
3087         const __be32 *fw_data;
3088         int err, i;
3089
3090         fw_data = (void *)tp->fw->data;
3091
3092         /* Firmware blob starts with version numbers, followed by
3093            start address and length. We are setting complete length.
3094            length = end_address_of_bss - start_address_of_text.
3095            Remainder is the blob to be loaded contiguously
3096            from start address. */
3097
3098         info.fw_base = be32_to_cpu(fw_data[1]);
3099         info.fw_len = tp->fw->size - 12;
3100         info.fw_data = &fw_data[3];
3101
3102         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104                                     &info);
3105         if (err)
3106                 return err;
3107
3108         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110                                     &info);
3111         if (err)
3112                 return err;
3113
3114         /* Now startup only the RX cpu. */
3115         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117
3118         for (i = 0; i < 5; i++) {
3119                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120                         break;
3121                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3123                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124                 udelay(1000);
3125         }
3126         if (i >= 5) {
3127                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128                            "should be %08x\n", __func__,
3129                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130                 return -ENODEV;
3131         }
3132         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3134
3135         return 0;
3136 }
3137
3138 /* tp->lock is held. */
3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141         struct fw_info info;
3142         const __be32 *fw_data;
3143         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144         int err, i;
3145
3146         if (tg3_flag(tp, HW_TSO_1) ||
3147             tg3_flag(tp, HW_TSO_2) ||
3148             tg3_flag(tp, HW_TSO_3))
3149                 return 0;
3150
3151         fw_data = (void *)tp->fw->data;
3152
3153         /* Firmware blob starts with version numbers, followed by
3154            start address and length. We are setting complete length.
3155            length = end_address_of_bss - start_address_of_text.
3156            Remainder is the blob to be loaded contiguously
3157            from start address. */
3158
3159         info.fw_base = be32_to_cpu(fw_data[1]);
3160         cpu_scratch_size = tp->fw_len;
3161         info.fw_len = tp->fw->size - 12;
3162         info.fw_data = &fw_data[3];
3163
3164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165                 cpu_base = RX_CPU_BASE;
3166                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167         } else {
3168                 cpu_base = TX_CPU_BASE;
3169                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171         }
3172
3173         err = tg3_load_firmware_cpu(tp, cpu_base,
3174                                     cpu_scratch_base, cpu_scratch_size,
3175                                     &info);
3176         if (err)
3177                 return err;
3178
3179         /* Now startup the cpu. */
3180         tw32(cpu_base + CPU_STATE, 0xffffffff);
3181         tw32_f(cpu_base + CPU_PC, info.fw_base);
3182
3183         for (i = 0; i < 5; i++) {
3184                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185                         break;
3186                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3187                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3188                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3189                 udelay(1000);
3190         }
3191         if (i >= 5) {
3192                 netdev_err(tp->dev,
3193                            "%s fails to set CPU PC, is %08x should be %08x\n",
3194                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195                 return -ENODEV;
3196         }
3197         tw32(cpu_base + CPU_STATE, 0xffffffff);
3198         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3199         return 0;
3200 }
3201
3202
3203 /* tp->lock is held. */
3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206         u32 addr_high, addr_low;
3207         int i;
3208
3209         addr_high = ((tp->dev->dev_addr[0] << 8) |
3210                      tp->dev->dev_addr[1]);
3211         addr_low = ((tp->dev->dev_addr[2] << 24) |
3212                     (tp->dev->dev_addr[3] << 16) |
3213                     (tp->dev->dev_addr[4] <<  8) |
3214                     (tp->dev->dev_addr[5] <<  0));
3215         for (i = 0; i < 4; i++) {
3216                 if (i == 1 && skip_mac_1)
3217                         continue;
3218                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220         }
3221
3222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224                 for (i = 0; i < 12; i++) {
3225                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227                 }
3228         }
3229
3230         addr_high = (tp->dev->dev_addr[0] +
3231                      tp->dev->dev_addr[1] +
3232                      tp->dev->dev_addr[2] +
3233                      tp->dev->dev_addr[3] +
3234                      tp->dev->dev_addr[4] +
3235                      tp->dev->dev_addr[5]) &
3236                 TX_BACKOFF_SEED_MASK;
3237         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239
3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242         /*
3243          * Make sure register accesses (indirect or otherwise) will function
3244          * correctly.
3245          */
3246         pci_write_config_dword(tp->pdev,
3247                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249
3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252         int err;
3253
3254         tg3_enable_register_access(tp);
3255
3256         err = pci_set_power_state(tp->pdev, PCI_D0);
3257         if (!err) {
3258                 /* Switch out of Vaux if it is a NIC */
3259                 tg3_pwrsrc_switch_to_vmain(tp);
3260         } else {
3261                 netdev_err(tp->dev, "Transition to D0 failed\n");
3262         }
3263
3264         return err;
3265 }
3266
3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269         u32 misc_host_ctrl;
3270         bool device_should_wake, do_low_power;
3271
3272         tg3_enable_register_access(tp);
3273
3274         /* Restore the CLKREQ setting. */
3275         if (tg3_flag(tp, CLKREQ_BUG)) {
3276                 u16 lnkctl;
3277
3278                 pci_read_config_word(tp->pdev,
3279                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280                                      &lnkctl);
3281                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282                 pci_write_config_word(tp->pdev,
3283                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284                                       lnkctl);
3285         }
3286
3287         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288         tw32(TG3PCI_MISC_HOST_CTRL,
3289              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290
3291         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292                              tg3_flag(tp, WOL_ENABLE);
3293
3294         if (tg3_flag(tp, USE_PHYLIB)) {
3295                 do_low_power = false;
3296                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298                         struct phy_device *phydev;
3299                         u32 phyid, advertising;
3300
3301                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302
3303                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304
3305                         tp->link_config.orig_speed = phydev->speed;
3306                         tp->link_config.orig_duplex = phydev->duplex;
3307                         tp->link_config.orig_autoneg = phydev->autoneg;
3308                         tp->link_config.orig_advertising = phydev->advertising;
3309
3310                         advertising = ADVERTISED_TP |
3311                                       ADVERTISED_Pause |
3312                                       ADVERTISED_Autoneg |
3313                                       ADVERTISED_10baseT_Half;
3314
3315                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3317                                         advertising |=
3318                                                 ADVERTISED_100baseT_Half |
3319                                                 ADVERTISED_100baseT_Full |
3320                                                 ADVERTISED_10baseT_Full;
3321                                 else
3322                                         advertising |= ADVERTISED_10baseT_Full;
3323                         }
3324
3325                         phydev->advertising = advertising;
3326
3327                         phy_start_aneg(phydev);
3328
3329                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330                         if (phyid != PHY_ID_BCMAC131) {
3331                                 phyid &= PHY_BCM_OUI_MASK;
3332                                 if (phyid == PHY_BCM_OUI_1 ||
3333                                     phyid == PHY_BCM_OUI_2 ||
3334                                     phyid == PHY_BCM_OUI_3)
3335                                         do_low_power = true;
3336                         }
3337                 }
3338         } else {
3339                 do_low_power = true;
3340
3341                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343                         tp->link_config.orig_speed = tp->link_config.speed;
3344                         tp->link_config.orig_duplex = tp->link_config.duplex;
3345                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346                 }
3347
3348                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349                         tp->link_config.speed = SPEED_10;
3350                         tp->link_config.duplex = DUPLEX_HALF;
3351                         tp->link_config.autoneg = AUTONEG_ENABLE;
3352                         tg3_setup_phy(tp, 0);
3353                 }
3354         }
3355
3356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357                 u32 val;
3358
3359                 val = tr32(GRC_VCPU_EXT_CTRL);
3360                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3362                 int i;
3363                 u32 val;
3364
3365                 for (i = 0; i < 200; i++) {
3366                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368                                 break;
3369                         msleep(1);
3370                 }
3371         }
3372         if (tg3_flag(tp, WOL_CAP))
3373                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374                                                      WOL_DRV_STATE_SHUTDOWN |
3375                                                      WOL_DRV_WOL |
3376                                                      WOL_SET_MAGIC_PKT);
3377
3378         if (device_should_wake) {
3379                 u32 mac_mode;
3380
3381                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382                         if (do_low_power &&
3383                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384                                 tg3_phy_auxctl_write(tp,
3385                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3387                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389                                 udelay(40);
3390                         }
3391
3392                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3394                         else
3395                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3396
3397                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399                             ASIC_REV_5700) {
3400                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401                                              SPEED_100 : SPEED_10;
3402                                 if (tg3_5700_link_polarity(tp, speed))
3403                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3404                                 else
3405                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406                         }
3407                 } else {
3408                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3409                 }
3410
3411                 if (!tg3_flag(tp, 5750_PLUS))
3412                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3413
3414                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418
3419                 if (tg3_flag(tp, ENABLE_APE))
3420                         mac_mode |= MAC_MODE_APE_TX_EN |
3421                                     MAC_MODE_APE_RX_EN |
3422                                     MAC_MODE_TDE_ENABLE;
3423
3424                 tw32_f(MAC_MODE, mac_mode);
3425                 udelay(100);
3426
3427                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428                 udelay(10);
3429         }
3430
3431         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434                 u32 base_val;
3435
3436                 base_val = tp->pci_clock_ctrl;
3437                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438                              CLOCK_CTRL_TXCLK_DISABLE);
3439
3440                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442         } else if (tg3_flag(tp, 5780_CLASS) ||
3443                    tg3_flag(tp, CPMU_PRESENT) ||
3444                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445                 /* do nothing */
3446         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447                 u32 newbits1, newbits2;
3448
3449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452                                     CLOCK_CTRL_TXCLK_DISABLE |
3453                                     CLOCK_CTRL_ALTCLK);
3454                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455                 } else if (tg3_flag(tp, 5705_PLUS)) {
3456                         newbits1 = CLOCK_CTRL_625_CORE;
3457                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458                 } else {
3459                         newbits1 = CLOCK_CTRL_ALTCLK;
3460                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461                 }
3462
3463                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464                             40);
3465
3466                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467                             40);
3468
3469                 if (!tg3_flag(tp, 5705_PLUS)) {
3470                         u32 newbits3;
3471
3472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475                                             CLOCK_CTRL_TXCLK_DISABLE |
3476                                             CLOCK_CTRL_44MHZ_CORE);
3477                         } else {
3478                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479                         }
3480
3481                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482                                     tp->pci_clock_ctrl | newbits3, 40);
3483                 }
3484         }
3485
3486         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487                 tg3_power_down_phy(tp, do_low_power);
3488
3489         tg3_frob_aux_power(tp, true);
3490
3491         /* Workaround for unstable PLL clock */
3492         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494                 u32 val = tr32(0x7d00);
3495
3496                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497                 tw32(0x7d00, val);
3498                 if (!tg3_flag(tp, ENABLE_ASF)) {
3499                         int err;
3500
3501                         err = tg3_nvram_lock(tp);
3502                         tg3_halt_cpu(tp, RX_CPU_BASE);
3503                         if (!err)
3504                                 tg3_nvram_unlock(tp);
3505                 }
3506         }
3507
3508         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509
3510         return 0;
3511 }
3512
3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515         tg3_power_down_prepare(tp);
3516
3517         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518         pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520
3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524         case MII_TG3_AUX_STAT_10HALF:
3525                 *speed = SPEED_10;
3526                 *duplex = DUPLEX_HALF;
3527                 break;
3528
3529         case MII_TG3_AUX_STAT_10FULL:
3530                 *speed = SPEED_10;
3531                 *duplex = DUPLEX_FULL;
3532                 break;
3533
3534         case MII_TG3_AUX_STAT_100HALF:
3535                 *speed = SPEED_100;
3536                 *duplex = DUPLEX_HALF;
3537                 break;
3538
3539         case MII_TG3_AUX_STAT_100FULL:
3540                 *speed = SPEED_100;
3541                 *duplex = DUPLEX_FULL;
3542                 break;
3543
3544         case MII_TG3_AUX_STAT_1000HALF:
3545                 *speed = SPEED_1000;
3546                 *duplex = DUPLEX_HALF;
3547                 break;
3548
3549         case MII_TG3_AUX_STAT_1000FULL:
3550                 *speed = SPEED_1000;
3551                 *duplex = DUPLEX_FULL;
3552                 break;
3553
3554         default:
3555                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557                                  SPEED_10;
3558                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559                                   DUPLEX_HALF;
3560                         break;
3561                 }
3562                 *speed = SPEED_INVALID;
3563                 *duplex = DUPLEX_INVALID;
3564                 break;
3565         }
3566 }
3567
3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570         int err = 0;
3571         u32 val, new_adv;
3572
3573         new_adv = ADVERTISE_CSMA;
3574         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575         new_adv |= mii_advertise_flowctrl(flowctrl);
3576
3577         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578         if (err)
3579                 goto done;
3580
3581         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583
3584                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587
3588                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589                 if (err)
3590                         goto done;
3591         }
3592
3593         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594                 goto done;
3595
3596         tw32(TG3_CPMU_EEE_MODE,
3597              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598
3599         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600         if (!err) {
3601                 u32 err2;
3602
3603                 val = 0;
3604                 /* Advertise 100-BaseTX EEE ability */
3605                 if (advertise & ADVERTISED_100baseT_Full)
3606                         val |= MDIO_AN_EEE_ADV_100TX;
3607                 /* Advertise 1000-BaseT EEE ability */
3608                 if (advertise & ADVERTISED_1000baseT_Full)
3609                         val |= MDIO_AN_EEE_ADV_1000T;
3610                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611                 if (err)
3612                         val = 0;
3613
3614                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615                 case ASIC_REV_5717:
3616                 case ASIC_REV_57765:
3617                 case ASIC_REV_57766:
3618                 case ASIC_REV_5719:
3619                         /* If we advertised any eee advertisements above... */
3620                         if (val)
3621                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3622                                       MII_TG3_DSP_TAP26_RMRXSTO |
3623                                       MII_TG3_DSP_TAP26_OPCSINPT;
3624                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625                         /* Fall through */
3626                 case ASIC_REV_5720:
3627                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3630                 }
3631
3632                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633                 if (!err)
3634                         err = err2;
3635         }
3636
3637 done:
3638         return err;
3639 }
3640
3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643         u32 new_adv;
3644         int i;
3645
3646         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647                 new_adv = ADVERTISED_10baseT_Half |
3648                           ADVERTISED_10baseT_Full;
3649                 if (tg3_flag(tp, WOL_SPEED_100MB))
3650                         new_adv |= ADVERTISED_100baseT_Half |
3651                                    ADVERTISED_100baseT_Full;
3652
3653                 tg3_phy_autoneg_cfg(tp, new_adv,
3654                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3655         } else if (tp->link_config.speed == SPEED_INVALID) {
3656                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657                         tp->link_config.advertising &=
3658                                 ~(ADVERTISED_1000baseT_Half |
3659                                   ADVERTISED_1000baseT_Full);
3660
3661                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662                                     tp->link_config.flowctrl);
3663         } else {
3664                 /* Asking for a specific link mode. */
3665                 if (tp->link_config.speed == SPEED_1000) {
3666                         if (tp->link_config.duplex == DUPLEX_FULL)
3667                                 new_adv = ADVERTISED_1000baseT_Full;
3668                         else
3669                                 new_adv = ADVERTISED_1000baseT_Half;
3670                 } else if (tp->link_config.speed == SPEED_100) {
3671                         if (tp->link_config.duplex == DUPLEX_FULL)
3672                                 new_adv = ADVERTISED_100baseT_Full;
3673                         else
3674                                 new_adv = ADVERTISED_100baseT_Half;
3675                 } else {
3676                         if (tp->link_config.duplex == DUPLEX_FULL)
3677                                 new_adv = ADVERTISED_10baseT_Full;
3678                         else
3679                                 new_adv = ADVERTISED_10baseT_Half;
3680                 }
3681
3682                 tg3_phy_autoneg_cfg(tp, new_adv,
3683                                     tp->link_config.flowctrl);
3684         }
3685
3686         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687             tp->link_config.speed != SPEED_INVALID) {
3688                 u32 bmcr, orig_bmcr;
3689
3690                 tp->link_config.active_speed = tp->link_config.speed;
3691                 tp->link_config.active_duplex = tp->link_config.duplex;
3692
3693                 bmcr = 0;
3694                 switch (tp->link_config.speed) {
3695                 default:
3696                 case SPEED_10:
3697                         break;
3698
3699                 case SPEED_100:
3700                         bmcr |= BMCR_SPEED100;
3701                         break;
3702
3703                 case SPEED_1000:
3704                         bmcr |= BMCR_SPEED1000;
3705                         break;
3706                 }
3707
3708                 if (tp->link_config.duplex == DUPLEX_FULL)
3709                         bmcr |= BMCR_FULLDPLX;
3710
3711                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712                     (bmcr != orig_bmcr)) {
3713                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714                         for (i = 0; i < 1500; i++) {
3715                                 u32 tmp;
3716
3717                                 udelay(10);
3718                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719                                     tg3_readphy(tp, MII_BMSR, &tmp))
3720                                         continue;
3721                                 if (!(tmp & BMSR_LSTATUS)) {
3722                                         udelay(40);
3723                                         break;
3724                                 }
3725                         }
3726                         tg3_writephy(tp, MII_BMCR, bmcr);
3727                         udelay(40);
3728                 }
3729         } else {
3730                 tg3_writephy(tp, MII_BMCR,
3731                              BMCR_ANENABLE | BMCR_ANRESTART);
3732         }
3733 }
3734
3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737         int err;
3738
3739         /* Turn off tap power management. */
3740         /* Set Extended packet length bit */
3741         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742
3743         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748
3749         udelay(40);
3750
3751         return err;
3752 }
3753
3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756         u32 advmsk, tgtadv, advertising;
3757
3758         advertising = tp->link_config.advertising;
3759         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760
3761         advmsk = ADVERTISE_ALL;
3762         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765         }
3766
3767         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768                 return false;
3769
3770         if ((*lcladv & advmsk) != tgtadv)
3771                 return false;
3772
3773         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774                 u32 tg3_ctrl;
3775
3776                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777
3778                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779                         return false;
3780
3781                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782                 if (tg3_ctrl != tgtadv)
3783                         return false;
3784         }
3785
3786         return true;
3787 }
3788
3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791         u32 lpeth = 0;
3792
3793         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794                 u32 val;
3795
3796                 if (tg3_readphy(tp, MII_STAT1000, &val))
3797                         return false;
3798
3799                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800         }
3801
3802         if (tg3_readphy(tp, MII_LPA, rmtadv))
3803                 return false;
3804
3805         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806         tp->link_config.rmt_adv = lpeth;
3807
3808         return true;
3809 }
3810
3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813         int current_link_up;
3814         u32 bmsr, val;
3815         u32 lcl_adv, rmt_adv;
3816         u16 current_speed;
3817         u8 current_duplex;
3818         int i, err;
3819
3820         tw32(MAC_EVENT, 0);
3821
3822         tw32_f(MAC_STATUS,
3823              (MAC_STATUS_SYNC_CHANGED |
3824               MAC_STATUS_CFG_CHANGED |
3825               MAC_STATUS_MI_COMPLETION |
3826               MAC_STATUS_LNKSTATE_CHANGED));
3827         udelay(40);
3828
3829         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830                 tw32_f(MAC_MI_MODE,
3831                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832                 udelay(80);
3833         }
3834
3835         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836
3837         /* Some third-party PHYs need to be reset on link going
3838          * down.
3839          */
3840         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843             netif_carrier_ok(tp->dev)) {
3844                 tg3_readphy(tp, MII_BMSR, &bmsr);
3845                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846                     !(bmsr & BMSR_LSTATUS))
3847                         force_reset = 1;
3848         }
3849         if (force_reset)
3850                 tg3_phy_reset(tp);
3851
3852         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853                 tg3_readphy(tp, MII_BMSR, &bmsr);
3854                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855                     !tg3_flag(tp, INIT_COMPLETE))
3856                         bmsr = 0;
3857
3858                 if (!(bmsr & BMSR_LSTATUS)) {
3859                         err = tg3_init_5401phy_dsp(tp);
3860                         if (err)
3861                                 return err;
3862
3863                         tg3_readphy(tp, MII_BMSR, &bmsr);
3864                         for (i = 0; i < 1000; i++) {
3865                                 udelay(10);
3866                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867                                     (bmsr & BMSR_LSTATUS)) {
3868                                         udelay(40);
3869                                         break;
3870                                 }
3871                         }
3872
3873                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874                             TG3_PHY_REV_BCM5401_B0 &&
3875                             !(bmsr & BMSR_LSTATUS) &&
3876                             tp->link_config.active_speed == SPEED_1000) {
3877                                 err = tg3_phy_reset(tp);
3878                                 if (!err)
3879                                         err = tg3_init_5401phy_dsp(tp);
3880                                 if (err)
3881                                         return err;
3882                         }
3883                 }
3884         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886                 /* 5701 {A0,B0} CRC bug workaround */
3887                 tg3_writephy(tp, 0x15, 0x0a75);
3888                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891         }
3892
3893         /* Clear pending interrupts... */
3894         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896
3897         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901
3902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907                 else
3908                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909         }
3910
3911         current_link_up = 0;
3912         current_speed = SPEED_INVALID;
3913         current_duplex = DUPLEX_INVALID;
3914         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915         tp->link_config.rmt_adv = 0;
3916
3917         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918                 err = tg3_phy_auxctl_read(tp,
3919                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920                                           &val);
3921                 if (!err && !(val & (1 << 10))) {
3922                         tg3_phy_auxctl_write(tp,
3923                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924                                              val | (1 << 10));
3925                         goto relink;
3926                 }
3927         }
3928
3929         bmsr = 0;
3930         for (i = 0; i < 100; i++) {
3931                 tg3_readphy(tp, MII_BMSR, &bmsr);
3932                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933                     (bmsr & BMSR_LSTATUS))
3934                         break;
3935                 udelay(40);
3936         }
3937
3938         if (bmsr & BMSR_LSTATUS) {
3939                 u32 aux_stat, bmcr;
3940
3941                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942                 for (i = 0; i < 2000; i++) {
3943                         udelay(10);
3944                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945                             aux_stat)
3946                                 break;
3947                 }
3948
3949                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950                                              &current_speed,
3951                                              &current_duplex);
3952
3953                 bmcr = 0;
3954                 for (i = 0; i < 200; i++) {
3955                         tg3_readphy(tp, MII_BMCR, &bmcr);
3956                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957                                 continue;
3958                         if (bmcr && bmcr != 0x7fff)
3959                                 break;
3960                         udelay(10);
3961                 }
3962
3963                 lcl_adv = 0;
3964                 rmt_adv = 0;
3965
3966                 tp->link_config.active_speed = current_speed;
3967                 tp->link_config.active_duplex = current_duplex;
3968
3969                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970                         if ((bmcr & BMCR_ANENABLE) &&
3971                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973                                 current_link_up = 1;
3974                 } else {
3975                         if (!(bmcr & BMCR_ANENABLE) &&
3976                             tp->link_config.speed == current_speed &&
3977                             tp->link_config.duplex == current_duplex &&
3978                             tp->link_config.flowctrl ==
3979                             tp->link_config.active_flowctrl) {
3980                                 current_link_up = 1;
3981                         }
3982                 }
3983
3984                 if (current_link_up == 1 &&
3985                     tp->link_config.active_duplex == DUPLEX_FULL) {
3986                         u32 reg, bit;
3987
3988                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989                                 reg = MII_TG3_FET_GEN_STAT;
3990                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991                         } else {
3992                                 reg = MII_TG3_EXT_STAT;
3993                                 bit = MII_TG3_EXT_STAT_MDIX;
3994                         }
3995
3996                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998
3999                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000                 }
4001         }
4002
4003 relink:
4004         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005                 tg3_phy_copper_begin(tp);
4006
4007                 tg3_readphy(tp, MII_BMSR, &bmsr);
4008                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010                         current_link_up = 1;
4011         }
4012
4013         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014         if (current_link_up == 1) {
4015                 if (tp->link_config.active_speed == SPEED_100 ||
4016                     tp->link_config.active_speed == SPEED_10)
4017                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018                 else
4019                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022         else
4023                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024
4025         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026         if (tp->link_config.active_duplex == DUPLEX_HALF)
4027                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028
4029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030                 if (current_link_up == 1 &&
4031                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033                 else
4034                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035         }
4036
4037         /* ??? Without this setting Netgear GA302T PHY does not
4038          * ??? send/receive packets...
4039          */
4040         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4044                 udelay(80);
4045         }
4046
4047         tw32_f(MAC_MODE, tp->mac_mode);
4048         udelay(40);
4049
4050         tg3_phy_eee_adjust(tp, current_link_up);
4051
4052         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053                 /* Polled via timer. */
4054                 tw32_f(MAC_EVENT, 0);
4055         } else {
4056                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057         }
4058         udelay(40);
4059
4060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061             current_link_up == 1 &&
4062             tp->link_config.active_speed == SPEED_1000 &&
4063             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064                 udelay(120);
4065                 tw32_f(MAC_STATUS,
4066                      (MAC_STATUS_SYNC_CHANGED |
4067                       MAC_STATUS_CFG_CHANGED));
4068                 udelay(40);
4069                 tg3_write_mem(tp,
4070                               NIC_SRAM_FIRMWARE_MBOX,
4071                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072         }
4073
4074         /* Prevent send BD corruption. */
4075         if (tg3_flag(tp, CLKREQ_BUG)) {
4076                 u16 oldlnkctl, newlnkctl;
4077
4078                 pci_read_config_word(tp->pdev,
4079                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080                                      &oldlnkctl);
4081                 if (tp->link_config.active_speed == SPEED_100 ||
4082                     tp->link_config.active_speed == SPEED_10)
4083                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084                 else
4085                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086                 if (newlnkctl != oldlnkctl)
4087                         pci_write_config_word(tp->pdev,
4088                                               pci_pcie_cap(tp->pdev) +
4089                                               PCI_EXP_LNKCTL, newlnkctl);
4090         }
4091
4092         if (current_link_up != netif_carrier_ok(tp->dev)) {
4093                 if (current_link_up)
4094                         netif_carrier_on(tp->dev);
4095                 else
4096                         netif_carrier_off(tp->dev);
4097                 tg3_link_report(tp);
4098         }
4099
4100         return 0;
4101 }
4102
4103 struct tg3_fiber_aneginfo {
4104         int state;
4105 #define ANEG_STATE_UNKNOWN              0
4106 #define ANEG_STATE_AN_ENABLE            1
4107 #define ANEG_STATE_RESTART_INIT         2
4108 #define ANEG_STATE_RESTART              3
4109 #define ANEG_STATE_DISABLE_LINK_OK      4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4111 #define ANEG_STATE_ABILITY_DETECT       6
4112 #define ANEG_STATE_ACK_DETECT_INIT      7
4113 #define ANEG_STATE_ACK_DETECT           8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4115 #define ANEG_STATE_COMPLETE_ACK         10
4116 #define ANEG_STATE_IDLE_DETECT_INIT     11
4117 #define ANEG_STATE_IDLE_DETECT          12
4118 #define ANEG_STATE_LINK_OK              13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4121
4122         u32 flags;
4123 #define MR_AN_ENABLE            0x00000001
4124 #define MR_RESTART_AN           0x00000002
4125 #define MR_AN_COMPLETE          0x00000004
4126 #define MR_PAGE_RX              0x00000008
4127 #define MR_NP_LOADED            0x00000010
4128 #define MR_TOGGLE_TX            0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4136 #define MR_TOGGLE_RX            0x00002000
4137 #define MR_NP_RX                0x00004000
4138
4139 #define MR_LINK_OK              0x80000000
4140
4141         unsigned long link_time, cur_time;
4142
4143         u32 ability_match_cfg;
4144         int ability_match_count;
4145
4146         char ability_match, idle_match, ack_match;
4147
4148         u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP             0x00000080
4150 #define ANEG_CFG_ACK            0x00000040
4151 #define ANEG_CFG_RF2            0x00000020
4152 #define ANEG_CFG_RF1            0x00000010
4153 #define ANEG_CFG_PS2            0x00000001
4154 #define ANEG_CFG_PS1            0x00008000
4155 #define ANEG_CFG_HD             0x00004000
4156 #define ANEG_CFG_FD             0x00002000
4157 #define ANEG_CFG_INVAL          0x00001f06
4158
4159 };
4160 #define ANEG_OK         0
4161 #define ANEG_DONE       1
4162 #define ANEG_TIMER_ENAB 2
4163 #define ANEG_FAILED     -1
4164
4165 #define ANEG_STATE_SETTLE_TIME  10000
4166
4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168                                    struct tg3_fiber_aneginfo *ap)
4169 {
4170         u16 flowctrl;
4171         unsigned long delta;
4172         u32 rx_cfg_reg;
4173         int ret;
4174
4175         if (ap->state == ANEG_STATE_UNKNOWN) {
4176                 ap->rxconfig = 0;
4177                 ap->link_time = 0;
4178                 ap->cur_time = 0;
4179                 ap->ability_match_cfg = 0;
4180                 ap->ability_match_count = 0;
4181                 ap->ability_match = 0;
4182                 ap->idle_match = 0;
4183                 ap->ack_match = 0;
4184         }
4185         ap->cur_time++;
4186
4187         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189
4190                 if (rx_cfg_reg != ap->ability_match_cfg) {
4191                         ap->ability_match_cfg = rx_cfg_reg;
4192                         ap->ability_match = 0;
4193                         ap->ability_match_count = 0;
4194                 } else {
4195                         if (++ap->ability_match_count > 1) {
4196                                 ap->ability_match = 1;
4197                                 ap->ability_match_cfg = rx_cfg_reg;
4198                         }
4199                 }
4200                 if (rx_cfg_reg & ANEG_CFG_ACK)
4201                         ap->ack_match = 1;
4202                 else
4203                         ap->ack_match = 0;
4204
4205                 ap->idle_match = 0;
4206         } else {
4207                 ap->idle_match = 1;
4208                 ap->ability_match_cfg = 0;
4209                 ap->ability_match_count = 0;
4210                 ap->ability_match = 0;
4211                 ap->ack_match = 0;
4212
4213                 rx_cfg_reg = 0;
4214         }
4215
4216         ap->rxconfig = rx_cfg_reg;
4217         ret = ANEG_OK;
4218
4219         switch (ap->state) {
4220         case ANEG_STATE_UNKNOWN:
4221                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222                         ap->state = ANEG_STATE_AN_ENABLE;
4223
4224                 /* fallthru */
4225         case ANEG_STATE_AN_ENABLE:
4226                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227                 if (ap->flags & MR_AN_ENABLE) {
4228                         ap->link_time = 0;
4229                         ap->cur_time = 0;
4230                         ap->ability_match_cfg = 0;
4231                         ap->ability_match_count = 0;
4232                         ap->ability_match = 0;
4233                         ap->idle_match = 0;
4234                         ap->ack_match = 0;
4235
4236                         ap->state = ANEG_STATE_RESTART_INIT;
4237                 } else {
4238                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239                 }
4240                 break;
4241
4242         case ANEG_STATE_RESTART_INIT:
4243                 ap->link_time = ap->cur_time;
4244                 ap->flags &= ~(MR_NP_LOADED);
4245                 ap->txconfig = 0;
4246                 tw32(MAC_TX_AUTO_NEG, 0);
4247                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248                 tw32_f(MAC_MODE, tp->mac_mode);
4249                 udelay(40);
4250
4251                 ret = ANEG_TIMER_ENAB;
4252                 ap->state = ANEG_STATE_RESTART;
4253
4254                 /* fallthru */
4255         case ANEG_STATE_RESTART:
4256                 delta = ap->cur_time - ap->link_time;
4257                 if (delta > ANEG_STATE_SETTLE_TIME)
4258                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259                 else
4260                         ret = ANEG_TIMER_ENAB;
4261                 break;
4262
4263         case ANEG_STATE_DISABLE_LINK_OK:
4264                 ret = ANEG_DONE;
4265                 break;
4266
4267         case ANEG_STATE_ABILITY_DETECT_INIT:
4268                 ap->flags &= ~(MR_TOGGLE_TX);
4269                 ap->txconfig = ANEG_CFG_FD;
4270                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271                 if (flowctrl & ADVERTISE_1000XPAUSE)
4272                         ap->txconfig |= ANEG_CFG_PS1;
4273                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274                         ap->txconfig |= ANEG_CFG_PS2;
4275                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277                 tw32_f(MAC_MODE, tp->mac_mode);
4278                 udelay(40);
4279
4280                 ap->state = ANEG_STATE_ABILITY_DETECT;
4281                 break;
4282
4283         case ANEG_STATE_ABILITY_DETECT:
4284                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4285                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286                 break;
4287
4288         case ANEG_STATE_ACK_DETECT_INIT:
4289                 ap->txconfig |= ANEG_CFG_ACK;
4290                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292                 tw32_f(MAC_MODE, tp->mac_mode);
4293                 udelay(40);
4294
4295                 ap->state = ANEG_STATE_ACK_DETECT;
4296
4297                 /* fallthru */
4298         case ANEG_STATE_ACK_DETECT:
4299                 if (ap->ack_match != 0) {
4300                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303                         } else {
4304                                 ap->state = ANEG_STATE_AN_ENABLE;
4305                         }
4306                 } else if (ap->ability_match != 0 &&
4307                            ap->rxconfig == 0) {
4308                         ap->state = ANEG_STATE_AN_ENABLE;
4309                 }
4310                 break;
4311
4312         case ANEG_STATE_COMPLETE_ACK_INIT:
4313                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4314                         ret = ANEG_FAILED;
4315                         break;
4316                 }
4317                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318                                MR_LP_ADV_HALF_DUPLEX |
4319                                MR_LP_ADV_SYM_PAUSE |
4320                                MR_LP_ADV_ASYM_PAUSE |
4321                                MR_LP_ADV_REMOTE_FAULT1 |
4322                                MR_LP_ADV_REMOTE_FAULT2 |
4323                                MR_LP_ADV_NEXT_PAGE |
4324                                MR_TOGGLE_RX |
4325                                MR_NP_RX);
4326                 if (ap->rxconfig & ANEG_CFG_FD)
4327                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328                 if (ap->rxconfig & ANEG_CFG_HD)
4329                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330                 if (ap->rxconfig & ANEG_CFG_PS1)
4331                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332                 if (ap->rxconfig & ANEG_CFG_PS2)
4333                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334                 if (ap->rxconfig & ANEG_CFG_RF1)
4335                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336                 if (ap->rxconfig & ANEG_CFG_RF2)
4337                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338                 if (ap->rxconfig & ANEG_CFG_NP)
4339                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340
4341                 ap->link_time = ap->cur_time;
4342
4343                 ap->flags ^= (MR_TOGGLE_TX);
4344                 if (ap->rxconfig & 0x0008)
4345                         ap->flags |= MR_TOGGLE_RX;
4346                 if (ap->rxconfig & ANEG_CFG_NP)
4347                         ap->flags |= MR_NP_RX;
4348                 ap->flags |= MR_PAGE_RX;
4349
4350                 ap->state = ANEG_STATE_COMPLETE_ACK;
4351                 ret = ANEG_TIMER_ENAB;
4352                 break;
4353
4354         case ANEG_STATE_COMPLETE_ACK:
4355                 if (ap->ability_match != 0 &&
4356                     ap->rxconfig == 0) {
4357                         ap->state = ANEG_STATE_AN_ENABLE;
4358                         break;
4359                 }
4360                 delta = ap->cur_time - ap->link_time;
4361                 if (delta > ANEG_STATE_SETTLE_TIME) {
4362                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364                         } else {
4365                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366                                     !(ap->flags & MR_NP_RX)) {
4367                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368                                 } else {
4369                                         ret = ANEG_FAILED;
4370                                 }
4371                         }
4372                 }
4373                 break;
4374
4375         case ANEG_STATE_IDLE_DETECT_INIT:
4376                 ap->link_time = ap->cur_time;
4377                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378                 tw32_f(MAC_MODE, tp->mac_mode);
4379                 udelay(40);
4380
4381                 ap->state = ANEG_STATE_IDLE_DETECT;
4382                 ret = ANEG_TIMER_ENAB;
4383                 break;
4384
4385         case ANEG_STATE_IDLE_DETECT:
4386                 if (ap->ability_match != 0 &&
4387                     ap->rxconfig == 0) {
4388                         ap->state = ANEG_STATE_AN_ENABLE;
4389                         break;
4390                 }
4391                 delta = ap->cur_time - ap->link_time;
4392                 if (delta > ANEG_STATE_SETTLE_TIME) {
4393                         /* XXX another gem from the Broadcom driver :( */
4394                         ap->state = ANEG_STATE_LINK_OK;
4395                 }
4396                 break;
4397
4398         case ANEG_STATE_LINK_OK:
4399                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400                 ret = ANEG_DONE;
4401                 break;
4402
4403         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404                 /* ??? unimplemented */
4405                 break;
4406
4407         case ANEG_STATE_NEXT_PAGE_WAIT:
4408                 /* ??? unimplemented */
4409                 break;
4410
4411         default:
4412                 ret = ANEG_FAILED;
4413                 break;
4414         }
4415
4416         return ret;
4417 }
4418
4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421         int res = 0;
4422         struct tg3_fiber_aneginfo aninfo;
4423         int status = ANEG_FAILED;
4424         unsigned int tick;
4425         u32 tmp;
4426
4427         tw32_f(MAC_TX_AUTO_NEG, 0);
4428
4429         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431         udelay(40);
4432
4433         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434         udelay(40);
4435
4436         memset(&aninfo, 0, sizeof(aninfo));
4437         aninfo.flags |= MR_AN_ENABLE;
4438         aninfo.state = ANEG_STATE_UNKNOWN;
4439         aninfo.cur_time = 0;
4440         tick = 0;
4441         while (++tick < 195000) {
4442                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443                 if (status == ANEG_DONE || status == ANEG_FAILED)
4444                         break;
4445
4446                 udelay(1);
4447         }
4448
4449         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450         tw32_f(MAC_MODE, tp->mac_mode);
4451         udelay(40);
4452
4453         *txflags = aninfo.txconfig;
4454         *rxflags = aninfo.flags;
4455
4456         if (status == ANEG_DONE &&
4457             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458                              MR_LP_ADV_FULL_DUPLEX)))
4459                 res = 1;
4460
4461         return res;
4462 }
4463
4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466         u32 mac_status = tr32(MAC_STATUS);
4467         int i;
4468
4469         /* Reset when initting first time or we have a link. */
4470         if (tg3_flag(tp, INIT_COMPLETE) &&
4471             !(mac_status & MAC_STATUS_PCS_SYNCED))
4472                 return;
4473
4474         /* Set PLL lock range. */
4475         tg3_writephy(tp, 0x16, 0x8007);
4476
4477         /* SW reset */
4478         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479
4480         /* Wait for reset to complete. */
4481         /* XXX schedule_timeout() ... */
4482         for (i = 0; i < 500; i++)
4483                 udelay(10);
4484
4485         /* Config mode; select PMA/Ch 1 regs. */
4486         tg3_writephy(tp, 0x10, 0x8411);
4487
4488         /* Enable auto-lock and comdet, select txclk for tx. */
4489         tg3_writephy(tp, 0x11, 0x0a10);
4490
4491         tg3_writephy(tp, 0x18, 0x00a0);
4492         tg3_writephy(tp, 0x16, 0x41ff);
4493
4494         /* Assert and deassert POR. */
4495         tg3_writephy(tp, 0x13, 0x0400);
4496         udelay(40);
4497         tg3_writephy(tp, 0x13, 0x0000);
4498
4499         tg3_writephy(tp, 0x11, 0x0a50);
4500         udelay(40);
4501         tg3_writephy(tp, 0x11, 0x0a10);
4502
4503         /* Wait for signal to stabilize */
4504         /* XXX schedule_timeout() ... */
4505         for (i = 0; i < 15000; i++)
4506                 udelay(10);
4507
4508         /* Deselect the channel register so we can read the PHYID
4509          * later.
4510          */
4511         tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513
4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516         u16 flowctrl;
4517         u32 sg_dig_ctrl, sg_dig_status;
4518         u32 serdes_cfg, expected_sg_dig_ctrl;
4519         int workaround, port_a;
4520         int current_link_up;
4521
4522         serdes_cfg = 0;
4523         expected_sg_dig_ctrl = 0;
4524         workaround = 0;
4525         port_a = 1;
4526         current_link_up = 0;
4527
4528         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530                 workaround = 1;
4531                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532                         port_a = 0;
4533
4534                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4535                 /* preserve bits 20-23 for voltage regulator */
4536                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537         }
4538
4539         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540
4541         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543                         if (workaround) {
4544                                 u32 val = serdes_cfg;
4545
4546                                 if (port_a)
4547                                         val |= 0xc010000;
4548                                 else
4549                                         val |= 0x4010000;
4550                                 tw32_f(MAC_SERDES_CFG, val);
4551                         }
4552
4553                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554                 }
4555                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556                         tg3_setup_flow_control(tp, 0, 0);
4557                         current_link_up = 1;
4558                 }
4559                 goto out;
4560         }
4561
4562         /* Want auto-negotiation.  */
4563         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564
4565         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566         if (flowctrl & ADVERTISE_1000XPAUSE)
4567                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570
4571         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573                     tp->serdes_counter &&
4574                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575                                     MAC_STATUS_RCVD_CFG)) ==
4576                      MAC_STATUS_PCS_SYNCED)) {
4577                         tp->serdes_counter--;
4578                         current_link_up = 1;
4579                         goto out;
4580                 }
4581 restart_autoneg:
4582                 if (workaround)
4583                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585                 udelay(5);
4586                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587
4588                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591                                  MAC_STATUS_SIGNAL_DET)) {
4592                 sg_dig_status = tr32(SG_DIG_STATUS);
4593                 mac_status = tr32(MAC_STATUS);
4594
4595                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597                         u32 local_adv = 0, remote_adv = 0;
4598
4599                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600                                 local_adv |= ADVERTISE_1000XPAUSE;
4601                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4603
4604                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605                                 remote_adv |= LPA_1000XPAUSE;
4606                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4608
4609                         tp->link_config.rmt_adv =
4610                                            mii_adv_to_ethtool_adv_x(remote_adv);
4611
4612                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4613                         current_link_up = 1;
4614                         tp->serdes_counter = 0;
4615                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617                         if (tp->serdes_counter)
4618                                 tp->serdes_counter--;
4619                         else {
4620                                 if (workaround) {
4621                                         u32 val = serdes_cfg;
4622
4623                                         if (port_a)
4624                                                 val |= 0xc010000;
4625                                         else
4626                                                 val |= 0x4010000;
4627
4628                                         tw32_f(MAC_SERDES_CFG, val);
4629                                 }
4630
4631                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632                                 udelay(40);
4633
4634                                 /* Link parallel detection - link is up */
4635                                 /* only if we have PCS_SYNC and not */
4636                                 /* receiving config code words */
4637                                 mac_status = tr32(MAC_STATUS);
4638                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640                                         tg3_setup_flow_control(tp, 0, 0);
4641                                         current_link_up = 1;
4642                                         tp->phy_flags |=
4643                                                 TG3_PHYFLG_PARALLEL_DETECT;
4644                                         tp->serdes_counter =
4645                                                 SERDES_PARALLEL_DET_TIMEOUT;
4646                                 } else
4647                                         goto restart_autoneg;
4648                         }
4649                 }
4650         } else {
4651                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653         }
4654
4655 out:
4656         return current_link_up;
4657 }
4658
4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661         int current_link_up = 0;
4662
4663         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664                 goto out;
4665
4666         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667                 u32 txflags, rxflags;
4668                 int i;
4669
4670                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671                         u32 local_adv = 0, remote_adv = 0;
4672
4673                         if (txflags & ANEG_CFG_PS1)
4674                                 local_adv |= ADVERTISE_1000XPAUSE;
4675                         if (txflags & ANEG_CFG_PS2)
4676                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4677
4678                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679                                 remote_adv |= LPA_1000XPAUSE;
4680                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4682
4683                         tp->link_config.rmt_adv =
4684                                            mii_adv_to_ethtool_adv_x(remote_adv);
4685
4686                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4687
4688                         current_link_up = 1;
4689                 }
4690                 for (i = 0; i < 30; i++) {
4691                         udelay(20);
4692                         tw32_f(MAC_STATUS,
4693                                (MAC_STATUS_SYNC_CHANGED |
4694                                 MAC_STATUS_CFG_CHANGED));
4695                         udelay(40);
4696                         if ((tr32(MAC_STATUS) &
4697                              (MAC_STATUS_SYNC_CHANGED |
4698                               MAC_STATUS_CFG_CHANGED)) == 0)
4699                                 break;
4700                 }
4701
4702                 mac_status = tr32(MAC_STATUS);
4703                 if (current_link_up == 0 &&
4704                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705                     !(mac_status & MAC_STATUS_RCVD_CFG))
4706                         current_link_up = 1;
4707         } else {
4708                 tg3_setup_flow_control(tp, 0, 0);
4709
4710                 /* Forcing 1000FD link up. */
4711                 current_link_up = 1;
4712
4713                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714                 udelay(40);
4715
4716                 tw32_f(MAC_MODE, tp->mac_mode);
4717                 udelay(40);
4718         }
4719
4720 out:
4721         return current_link_up;
4722 }
4723
4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726         u32 orig_pause_cfg;
4727         u16 orig_active_speed;
4728         u8 orig_active_duplex;
4729         u32 mac_status;
4730         int current_link_up;
4731         int i;
4732
4733         orig_pause_cfg = tp->link_config.active_flowctrl;
4734         orig_active_speed = tp->link_config.active_speed;
4735         orig_active_duplex = tp->link_config.active_duplex;
4736
4737         if (!tg3_flag(tp, HW_AUTONEG) &&
4738             netif_carrier_ok(tp->dev) &&
4739             tg3_flag(tp, INIT_COMPLETE)) {
4740                 mac_status = tr32(MAC_STATUS);
4741                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4742                                MAC_STATUS_SIGNAL_DET |
4743                                MAC_STATUS_CFG_CHANGED |
4744                                MAC_STATUS_RCVD_CFG);
4745                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746                                    MAC_STATUS_SIGNAL_DET)) {
4747                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748                                             MAC_STATUS_CFG_CHANGED));
4749                         return 0;
4750                 }
4751         }
4752
4753         tw32_f(MAC_TX_AUTO_NEG, 0);
4754
4755         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757         tw32_f(MAC_MODE, tp->mac_mode);
4758         udelay(40);
4759
4760         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761                 tg3_init_bcm8002(tp);
4762
4763         /* Enable link change event even when serdes polling.  */
4764         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765         udelay(40);
4766
4767         current_link_up = 0;
4768         tp->link_config.rmt_adv = 0;
4769         mac_status = tr32(MAC_STATUS);
4770
4771         if (tg3_flag(tp, HW_AUTONEG))
4772                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773         else
4774                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775
4776         tp->napi[0].hw_status->status =
4777                 (SD_STATUS_UPDATED |
4778                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779
4780         for (i = 0; i < 100; i++) {
4781                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782                                     MAC_STATUS_CFG_CHANGED));
4783                 udelay(5);
4784                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785                                          MAC_STATUS_CFG_CHANGED |
4786                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787                         break;
4788         }
4789
4790         mac_status = tr32(MAC_STATUS);
4791         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792                 current_link_up = 0;
4793                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794                     tp->serdes_counter == 0) {
4795                         tw32_f(MAC_MODE, (tp->mac_mode |
4796                                           MAC_MODE_SEND_CONFIGS));
4797                         udelay(1);
4798                         tw32_f(MAC_MODE, tp->mac_mode);
4799                 }
4800         }
4801
4802         if (current_link_up == 1) {
4803                 tp->link_config.active_speed = SPEED_1000;
4804                 tp->link_config.active_duplex = DUPLEX_FULL;
4805                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806                                     LED_CTRL_LNKLED_OVERRIDE |
4807                                     LED_CTRL_1000MBPS_ON));
4808         } else {
4809                 tp->link_config.active_speed = SPEED_INVALID;
4810                 tp->link_config.active_duplex = DUPLEX_INVALID;
4811                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812                                     LED_CTRL_LNKLED_OVERRIDE |
4813                                     LED_CTRL_TRAFFIC_OVERRIDE));
4814         }
4815
4816         if (current_link_up != netif_carrier_ok(tp->dev)) {
4817                 if (current_link_up)
4818                         netif_carrier_on(tp->dev);
4819                 else
4820                         netif_carrier_off(tp->dev);
4821                 tg3_link_report(tp);
4822         } else {
4823                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824                 if (orig_pause_cfg != now_pause_cfg ||
4825                     orig_active_speed != tp->link_config.active_speed ||
4826                     orig_active_duplex != tp->link_config.active_duplex)
4827                         tg3_link_report(tp);
4828         }
4829
4830         return 0;
4831 }
4832
4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835         int current_link_up, err = 0;
4836         u32 bmsr, bmcr;
4837         u16 current_speed;
4838         u8 current_duplex;
4839         u32 local_adv, remote_adv;
4840
4841         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842         tw32_f(MAC_MODE, tp->mac_mode);
4843         udelay(40);
4844
4845         tw32(MAC_EVENT, 0);
4846
4847         tw32_f(MAC_STATUS,
4848              (MAC_STATUS_SYNC_CHANGED |
4849               MAC_STATUS_CFG_CHANGED |
4850               MAC_STATUS_MI_COMPLETION |
4851               MAC_STATUS_LNKSTATE_CHANGED));
4852         udelay(40);
4853
4854         if (force_reset)
4855                 tg3_phy_reset(tp);
4856
4857         current_link_up = 0;
4858         current_speed = SPEED_INVALID;
4859         current_duplex = DUPLEX_INVALID;
4860         tp->link_config.rmt_adv = 0;
4861
4862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866                         bmsr |= BMSR_LSTATUS;
4867                 else
4868                         bmsr &= ~BMSR_LSTATUS;
4869         }
4870
4871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872
4873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875                 /* do nothing, just check for link up at the end */
4876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877                 u32 adv, newadv;
4878
4879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881                                  ADVERTISE_1000XPAUSE |
4882                                  ADVERTISE_1000XPSE_ASYM |
4883                                  ADVERTISE_SLCT);
4884
4885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887
4888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
4890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891                         tg3_writephy(tp, MII_BMCR, bmcr);
4892
4893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896
4897                         return err;
4898                 }
4899         } else {
4900                 u32 new_bmcr;
4901
4902                 bmcr &= ~BMCR_SPEED1000;
4903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904
4905                 if (tp->link_config.duplex == DUPLEX_FULL)
4906                         new_bmcr |= BMCR_FULLDPLX;
4907
4908                 if (new_bmcr != bmcr) {
4909                         /* BMCR_SPEED1000 is a reserved bit that needs
4910                          * to be set on write.
4911                          */
4912                         new_bmcr |= BMCR_SPEED1000;
4913
4914                         /* Force a linkdown */
4915                         if (netif_carrier_ok(tp->dev)) {
4916                                 u32 adv;
4917
4918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919                                 adv &= ~(ADVERTISE_1000XFULL |
4920                                          ADVERTISE_1000XHALF |
4921                                          ADVERTISE_SLCT);
4922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4923                                 tg3_writephy(tp, MII_BMCR, bmcr |
4924                                                            BMCR_ANRESTART |
4925                                                            BMCR_ANENABLE);
4926                                 udelay(10);
4927                                 netif_carrier_off(tp->dev);
4928                         }
4929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4930                         bmcr = new_bmcr;
4931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934                             ASIC_REV_5714) {
4935                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936                                         bmsr |= BMSR_LSTATUS;
4937                                 else
4938                                         bmsr &= ~BMSR_LSTATUS;
4939                         }
4940                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941                 }
4942         }
4943
4944         if (bmsr & BMSR_LSTATUS) {
4945                 current_speed = SPEED_1000;
4946                 current_link_up = 1;
4947                 if (bmcr & BMCR_FULLDPLX)
4948                         current_duplex = DUPLEX_FULL;
4949                 else
4950                         current_duplex = DUPLEX_HALF;
4951
4952                 local_adv = 0;
4953                 remote_adv = 0;
4954
4955                 if (bmcr & BMCR_ANENABLE) {
4956                         u32 common;
4957
4958                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960                         common = local_adv & remote_adv;
4961                         if (common & (ADVERTISE_1000XHALF |
4962                                       ADVERTISE_1000XFULL)) {
4963                                 if (common & ADVERTISE_1000XFULL)
4964                                         current_duplex = DUPLEX_FULL;
4965                                 else
4966                                         current_duplex = DUPLEX_HALF;
4967
4968                                 tp->link_config.rmt_adv =
4969                                            mii_adv_to_ethtool_adv_x(remote_adv);
4970                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4971                                 /* Link is up via parallel detect */
4972                         } else {
4973                                 current_link_up = 0;
4974                         }
4975                 }
4976         }
4977
4978         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4980
4981         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982         if (tp->link_config.active_duplex == DUPLEX_HALF)
4983                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984
4985         tw32_f(MAC_MODE, tp->mac_mode);
4986         udelay(40);
4987
4988         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989
4990         tp->link_config.active_speed = current_speed;
4991         tp->link_config.active_duplex = current_duplex;
4992
4993         if (current_link_up != netif_carrier_ok(tp->dev)) {
4994                 if (current_link_up)
4995                         netif_carrier_on(tp->dev);
4996                 else {
4997                         netif_carrier_off(tp->dev);
4998                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999                 }
5000                 tg3_link_report(tp);
5001         }
5002         return err;
5003 }
5004
5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007         if (tp->serdes_counter) {
5008                 /* Give autoneg time to complete. */
5009                 tp->serdes_counter--;
5010                 return;
5011         }
5012
5013         if (!netif_carrier_ok(tp->dev) &&
5014             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015                 u32 bmcr;
5016
5017                 tg3_readphy(tp, MII_BMCR, &bmcr);
5018                 if (bmcr & BMCR_ANENABLE) {
5019                         u32 phy1, phy2;
5020
5021                         /* Select shadow register 0x1f */
5022                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024
5025                         /* Select expansion interrupt status register */
5026                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027                                          MII_TG3_DSP_EXP1_INT_STAT);
5028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030
5031                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032                                 /* We have signal detect and not receiving
5033                                  * config code words, link is up by parallel
5034                                  * detection.
5035                                  */
5036
5037                                 bmcr &= ~BMCR_ANENABLE;
5038                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039                                 tg3_writephy(tp, MII_BMCR, bmcr);
5040                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041                         }
5042                 }
5043         } else if (netif_carrier_ok(tp->dev) &&
5044                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046                 u32 phy2;
5047
5048                 /* Select expansion interrupt status register */
5049                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050                                  MII_TG3_DSP_EXP1_INT_STAT);
5051                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052                 if (phy2 & 0x20) {
5053                         u32 bmcr;
5054
5055                         /* Config code words received, turn on autoneg. */
5056                         tg3_readphy(tp, MII_BMCR, &bmcr);
5057                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058
5059                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060
5061                 }
5062         }
5063 }
5064
5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067         u32 val;
5068         int err;
5069
5070         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071                 err = tg3_setup_fiber_phy(tp, force_reset);
5072         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074         else
5075                 err = tg3_setup_copper_phy(tp, force_reset);
5076
5077         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078                 u32 scale;
5079
5080                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082                         scale = 65;
5083                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084                         scale = 6;
5085                 else
5086                         scale = 12;
5087
5088                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090                 tw32(GRC_MISC_CFG, val);
5091         }
5092
5093         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094               (6 << TX_LENGTHS_IPG_SHIFT);
5095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096                 val |= tr32(MAC_TX_LENGTHS) &
5097                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5099
5100         if (tp->link_config.active_speed == SPEED_1000 &&
5101             tp->link_config.active_duplex == DUPLEX_HALF)
5102                 tw32(MAC_TX_LENGTHS, val |
5103                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104         else
5105                 tw32(MAC_TX_LENGTHS, val |
5106                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107
5108         if (!tg3_flag(tp, 5705_PLUS)) {
5109                 if (netif_carrier_ok(tp->dev)) {
5110                         tw32(HOSTCC_STAT_COAL_TICKS,
5111                              tp->coal.stats_block_coalesce_usecs);
5112                 } else {
5113                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114                 }
5115         }
5116
5117         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118                 val = tr32(PCIE_PWR_MGMT_THRESH);
5119                 if (!netif_carrier_ok(tp->dev))
5120                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121                               tp->pwrmgmt_thresh;
5122                 else
5123                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124                 tw32(PCIE_PWR_MGMT_THRESH, val);
5125         }
5126
5127         return err;
5128 }
5129
5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132         return tp->irq_sync;
5133 }
5134
5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137         int i;
5138
5139         dst = (u32 *)((u8 *)dst + off);
5140         for (i = 0; i < len; i += sizeof(u32))
5141                 *dst++ = tr32(off + i);
5142 }
5143
5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165
5166         if (tg3_flag(tp, SUPPORT_MSIX))
5167                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168
5169         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177
5178         if (!tg3_flag(tp, 5705_PLUS)) {
5179                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182         }
5183
5184         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189
5190         if (tg3_flag(tp, NVRAM))
5191                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193
5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196         int i;
5197         u32 *regs;
5198
5199         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200         if (!regs) {
5201                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202                 return;
5203         }
5204
5205         if (tg3_flag(tp, PCI_EXPRESS)) {
5206                 /* Read up to but not including private PCI registers */
5207                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208                         regs[i / sizeof(u32)] = tr32(i);
5209         } else
5210                 tg3_dump_legacy_regs(tp, regs);
5211
5212         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213                 if (!regs[i + 0] && !regs[i + 1] &&
5214                     !regs[i + 2] && !regs[i + 3])
5215                         continue;
5216
5217                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218                            i * 4,
5219                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220         }
5221
5222         kfree(regs);
5223
5224         for (i = 0; i < tp->irq_cnt; i++) {
5225                 struct tg3_napi *tnapi = &tp->napi[i];
5226
5227                 /* SW status block */
5228                 netdev_err(tp->dev,
5229                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230                            i,
5231                            tnapi->hw_status->status,
5232                            tnapi->hw_status->status_tag,
5233                            tnapi->hw_status->rx_jumbo_consumer,
5234                            tnapi->hw_status->rx_consumer,
5235                            tnapi->hw_status->rx_mini_consumer,
5236                            tnapi->hw_status->idx[0].rx_producer,
5237                            tnapi->hw_status->idx[0].tx_consumer);
5238
5239                 netdev_err(tp->dev,
5240                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241                            i,
5242                            tnapi->last_tag, tnapi->last_irq_tag,
5243                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244                            tnapi->rx_rcb_ptr,
5245                            tnapi->prodring.rx_std_prod_idx,
5246                            tnapi->prodring.rx_std_cons_idx,
5247                            tnapi->prodring.rx_jmb_prod_idx,
5248                            tnapi->prodring.rx_jmb_cons_idx);
5249         }
5250 }
5251
5252 /* This is called whenever we suspect that the system chipset is re-
5253  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254  * is bogus tx completions. We try to recover by setting the
5255  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256  * in the workqueue.
5257  */
5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262
5263         netdev_warn(tp->dev,
5264                     "The system may be re-ordering memory-mapped I/O "
5265                     "cycles to the network device, attempting to recover. "
5266                     "Please report the problem to the driver maintainer "
5267                     "and include system chipset information.\n");
5268
5269         spin_lock(&tp->lock);
5270         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271         spin_unlock(&tp->lock);
5272 }
5273
5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276         /* Tell compiler to fetch tx indices from memory. */
5277         barrier();
5278         return tnapi->tx_pending -
5279                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281
5282 /* Tigon3 never reports partial packet sends.  So we do not
5283  * need special logic to handle SKBs that have not had all
5284  * of their frags sent yet, like SunGEM does.
5285  */
5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288         struct tg3 *tp = tnapi->tp;
5289         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290         u32 sw_idx = tnapi->tx_cons;
5291         struct netdev_queue *txq;
5292         int index = tnapi - tp->napi;
5293         unsigned int pkts_compl = 0, bytes_compl = 0;
5294
5295         if (tg3_flag(tp, ENABLE_TSS))
5296                 index--;
5297
5298         txq = netdev_get_tx_queue(tp->dev, index);
5299
5300         while (sw_idx != hw_idx) {
5301                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302                 struct sk_buff *skb = ri->skb;
5303                 int i, tx_bug = 0;
5304
5305                 if (unlikely(skb == NULL)) {
5306                         tg3_tx_recover(tp);
5307                         return;
5308                 }
5309
5310                 pci_unmap_single(tp->pdev,
5311                                  dma_unmap_addr(ri, mapping),
5312                                  skb_headlen(skb),
5313                                  PCI_DMA_TODEVICE);
5314
5315                 ri->skb = NULL;
5316
5317                 while (ri->fragmented) {
5318                         ri->fragmented = false;
5319                         sw_idx = NEXT_TX(sw_idx);
5320                         ri = &tnapi->tx_buffers[sw_idx];
5321                 }
5322
5323                 sw_idx = NEXT_TX(sw_idx);
5324
5325                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326                         ri = &tnapi->tx_buffers[sw_idx];
5327                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328                                 tx_bug = 1;
5329
5330                         pci_unmap_page(tp->pdev,
5331                                        dma_unmap_addr(ri, mapping),
5332                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333                                        PCI_DMA_TODEVICE);
5334
5335                         while (ri->fragmented) {
5336                                 ri->fragmented = false;
5337                                 sw_idx = NEXT_TX(sw_idx);
5338                                 ri = &tnapi->tx_buffers[sw_idx];
5339                         }
5340
5341                         sw_idx = NEXT_TX(sw_idx);
5342                 }
5343
5344                 pkts_compl++;
5345                 bytes_compl += skb->len;
5346
5347                 dev_kfree_skb(skb);
5348
5349                 if (unlikely(tx_bug)) {
5350                         tg3_tx_recover(tp);
5351                         return;
5352                 }
5353         }
5354
5355         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5356
5357         tnapi->tx_cons = sw_idx;
5358
5359         /* Need to make the tx_cons update visible to tg3_start_xmit()
5360          * before checking for netif_queue_stopped().  Without the
5361          * memory barrier, there is a small possibility that tg3_start_xmit()
5362          * will miss it and cause the queue to be stopped forever.
5363          */
5364         smp_mb();
5365
5366         if (unlikely(netif_tx_queue_stopped(txq) &&
5367                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368                 __netif_tx_lock(txq, smp_processor_id());
5369                 if (netif_tx_queue_stopped(txq) &&
5370                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371                         netif_tx_wake_queue(txq);
5372                 __netif_tx_unlock(txq);
5373         }
5374 }
5375
5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378         if (!ri->data)
5379                 return;
5380
5381         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382                          map_sz, PCI_DMA_FROMDEVICE);
5383         kfree(ri->data);
5384         ri->data = NULL;
5385 }
5386
5387 /* Returns size of skb allocated or < 0 on error.
5388  *
5389  * We only need to fill in the address because the other members
5390  * of the RX descriptor are invariant, see tg3_init_rings.
5391  *
5392  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5393  * posting buffers we only dirty the first cache line of the RX
5394  * descriptor (containing the address).  Whereas for the RX status
5395  * buffers the cpu only reads the last cacheline of the RX descriptor
5396  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397  */
5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399                             u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401         struct tg3_rx_buffer_desc *desc;
5402         struct ring_info *map;
5403         u8 *data;
5404         dma_addr_t mapping;
5405         int skb_size, data_size, dest_idx;
5406
5407         switch (opaque_key) {
5408         case RXD_OPAQUE_RING_STD:
5409                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410                 desc = &tpr->rx_std[dest_idx];
5411                 map = &tpr->rx_std_buffers[dest_idx];
5412                 data_size = tp->rx_pkt_map_sz;
5413                 break;
5414
5415         case RXD_OPAQUE_RING_JUMBO:
5416                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417                 desc = &tpr->rx_jmb[dest_idx].std;
5418                 map = &tpr->rx_jmb_buffers[dest_idx];
5419                 data_size = TG3_RX_JMB_MAP_SZ;
5420                 break;
5421
5422         default:
5423                 return -EINVAL;
5424         }
5425
5426         /* Do not overwrite any of the map or rp information
5427          * until we are sure we can commit to a new buffer.
5428          *
5429          * Callers depend upon this behavior and assume that
5430          * we leave everything unchanged if we fail.
5431          */
5432         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434         data = kmalloc(skb_size, GFP_ATOMIC);
5435         if (!data)
5436                 return -ENOMEM;
5437
5438         mapping = pci_map_single(tp->pdev,
5439                                  data + TG3_RX_OFFSET(tp),
5440                                  data_size,
5441                                  PCI_DMA_FROMDEVICE);
5442         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443                 kfree(data);
5444                 return -EIO;
5445         }
5446
5447         map->data = data;
5448         dma_unmap_addr_set(map, mapping, mapping);
5449
5450         desc->addr_hi = ((u64)mapping >> 32);
5451         desc->addr_lo = ((u64)mapping & 0xffffffff);
5452
5453         return data_size;
5454 }
5455
5456 /* We only need to move over in the address because the other
5457  * members of the RX descriptor are invariant.  See notes above
5458  * tg3_alloc_rx_data for full details.
5459  */
5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461                            struct tg3_rx_prodring_set *dpr,
5462                            u32 opaque_key, int src_idx,
5463                            u32 dest_idx_unmasked)
5464 {
5465         struct tg3 *tp = tnapi->tp;
5466         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467         struct ring_info *src_map, *dest_map;
5468         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469         int dest_idx;
5470
5471         switch (opaque_key) {
5472         case RXD_OPAQUE_RING_STD:
5473                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474                 dest_desc = &dpr->rx_std[dest_idx];
5475                 dest_map = &dpr->rx_std_buffers[dest_idx];
5476                 src_desc = &spr->rx_std[src_idx];
5477                 src_map = &spr->rx_std_buffers[src_idx];
5478                 break;
5479
5480         case RXD_OPAQUE_RING_JUMBO:
5481                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5483                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484                 src_desc = &spr->rx_jmb[src_idx].std;
5485                 src_map = &spr->rx_jmb_buffers[src_idx];
5486                 break;
5487
5488         default:
5489                 return;
5490         }
5491
5492         dest_map->data = src_map->data;
5493         dma_unmap_addr_set(dest_map, mapping,
5494                            dma_unmap_addr(src_map, mapping));
5495         dest_desc->addr_hi = src_desc->addr_hi;
5496         dest_desc->addr_lo = src_desc->addr_lo;
5497
5498         /* Ensure that the update to the skb happens after the physical
5499          * addresses have been transferred to the new BD location.
5500          */
5501         smp_wmb();
5502
5503         src_map->data = NULL;
5504 }
5505
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507  * buffers to the chip, and one special ring the chip uses to report
5508  * status back to the host.
5509  *
5510  * The special ring reports the status of received packets to the
5511  * host.  The chip does not write into the original descriptor the
5512  * RX buffer was obtained from.  The chip simply takes the original
5513  * descriptor as provided by the host, updates the status and length
5514  * field, then writes this into the next status ring entry.
5515  *
5516  * Each ring the host uses to post buffers to the chip is described
5517  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5518  * it is first placed into the on-chip ram.  When the packet's length
5519  * is known, it walks down the TG3_BDINFO entries to select the ring.
5520  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521  * which is within the range of the new packet's length is chosen.
5522  *
5523  * The "separate ring for rx status" scheme may sound queer, but it makes
5524  * sense from a cache coherency perspective.  If only the host writes
5525  * to the buffer post rings, and only the chip writes to the rx status
5526  * rings, then cache lines never move beyond shared-modified state.
5527  * If both the host and chip were to write into the same ring, cache line
5528  * eviction could occur since both entities want it in an exclusive state.
5529  */
5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532         struct tg3 *tp = tnapi->tp;
5533         u32 work_mask, rx_std_posted = 0;
5534         u32 std_prod_idx, jmb_prod_idx;
5535         u32 sw_idx = tnapi->rx_rcb_ptr;
5536         u16 hw_idx;
5537         int received;
5538         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539
5540         hw_idx = *(tnapi->rx_rcb_prod_idx);
5541         /*
5542          * We need to order the read of hw_idx and the read of
5543          * the opaque cookie.
5544          */
5545         rmb();
5546         work_mask = 0;
5547         received = 0;
5548         std_prod_idx = tpr->rx_std_prod_idx;
5549         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550         while (sw_idx != hw_idx && budget > 0) {
5551                 struct ring_info *ri;
5552                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553                 unsigned int len;
5554                 struct sk_buff *skb;
5555                 dma_addr_t dma_addr;
5556                 u32 opaque_key, desc_idx, *post_ptr;
5557                 u8 *data;
5558
5559                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5562                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563                         dma_addr = dma_unmap_addr(ri, mapping);
5564                         data = ri->data;
5565                         post_ptr = &std_prod_idx;
5566                         rx_std_posted++;
5567                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569                         dma_addr = dma_unmap_addr(ri, mapping);
5570                         data = ri->data;
5571                         post_ptr = &jmb_prod_idx;
5572                 } else
5573                         goto next_pkt_nopost;
5574
5575                 work_mask |= opaque_key;
5576
5577                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579                 drop_it:
5580                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5581                                        desc_idx, *post_ptr);
5582                 drop_it_no_recycle:
5583                         /* Other statistics kept track of by card. */
5584                         tp->rx_dropped++;
5585                         goto next_pkt;
5586                 }
5587
5588                 prefetch(data + TG3_RX_OFFSET(tp));
5589                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590                       ETH_FCS_LEN;
5591
5592                 if (len > TG3_RX_COPY_THRESH(tp)) {
5593                         int skb_size;
5594
5595                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596                                                     *post_ptr);
5597                         if (skb_size < 0)
5598                                 goto drop_it;
5599
5600                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601                                          PCI_DMA_FROMDEVICE);
5602
5603                         skb = build_skb(data);
5604                         if (!skb) {
5605                                 kfree(data);
5606                                 goto drop_it_no_recycle;
5607                         }
5608                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5609                         /* Ensure that the update to the data happens
5610                          * after the usage of the old DMA mapping.
5611                          */
5612                         smp_wmb();
5613
5614                         ri->data = NULL;
5615
5616                 } else {
5617                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5618                                        desc_idx, *post_ptr);
5619
5620                         skb = netdev_alloc_skb(tp->dev,
5621                                                len + TG3_RAW_IP_ALIGN);
5622                         if (skb == NULL)
5623                                 goto drop_it_no_recycle;
5624
5625                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627                         memcpy(skb->data,
5628                                data + TG3_RX_OFFSET(tp),
5629                                len);
5630                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631                 }
5632
5633                 skb_put(skb, len);
5634                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5639                 else
5640                         skb_checksum_none_assert(skb);
5641
5642                 skb->protocol = eth_type_trans(skb, tp->dev);
5643
5644                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5645                     skb->protocol != htons(ETH_P_8021Q)) {
5646                         dev_kfree_skb(skb);
5647                         goto drop_it_no_recycle;
5648                 }
5649
5650                 if (desc->type_flags & RXD_FLAG_VLAN &&
5651                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652                         __vlan_hwaccel_put_tag(skb,
5653                                                desc->err_vlan & RXD_VLAN_MASK);
5654
5655                 napi_gro_receive(&tnapi->napi, skb);
5656
5657                 received++;
5658                 budget--;
5659
5660 next_pkt:
5661                 (*post_ptr)++;
5662
5663                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664                         tpr->rx_std_prod_idx = std_prod_idx &
5665                                                tp->rx_std_ring_mask;
5666                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667                                      tpr->rx_std_prod_idx);
5668                         work_mask &= ~RXD_OPAQUE_RING_STD;
5669                         rx_std_posted = 0;
5670                 }
5671 next_pkt_nopost:
5672                 sw_idx++;
5673                 sw_idx &= tp->rx_ret_ring_mask;
5674
5675                 /* Refresh hw_idx to see if there is new work */
5676                 if (sw_idx == hw_idx) {
5677                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5678                         rmb();
5679                 }
5680         }
5681
5682         /* ACK the status ring. */
5683         tnapi->rx_rcb_ptr = sw_idx;
5684         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685
5686         /* Refill RX ring(s). */
5687         if (!tg3_flag(tp, ENABLE_RSS)) {
5688                 if (work_mask & RXD_OPAQUE_RING_STD) {
5689                         tpr->rx_std_prod_idx = std_prod_idx &
5690                                                tp->rx_std_ring_mask;
5691                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692                                      tpr->rx_std_prod_idx);
5693                 }
5694                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696                                                tp->rx_jmb_ring_mask;
5697                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698                                      tpr->rx_jmb_prod_idx);
5699                 }
5700                 mmiowb();
5701         } else if (work_mask) {
5702                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703                  * updated before the producer indices can be updated.
5704                  */
5705                 smp_wmb();
5706
5707                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709
5710                 if (tnapi != &tp->napi[1])
5711                         napi_schedule(&tp->napi[1].napi);
5712         }
5713
5714         return received;
5715 }
5716
5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719         /* handle link change and other phy events */
5720         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722
5723                 if (sblk->status & SD_STATUS_LINK_CHG) {
5724                         sblk->status = SD_STATUS_UPDATED |
5725                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5726                         spin_lock(&tp->lock);
5727                         if (tg3_flag(tp, USE_PHYLIB)) {
5728                                 tw32_f(MAC_STATUS,
5729                                      (MAC_STATUS_SYNC_CHANGED |
5730                                       MAC_STATUS_CFG_CHANGED |
5731                                       MAC_STATUS_MI_COMPLETION |
5732                                       MAC_STATUS_LNKSTATE_CHANGED));
5733                                 udelay(40);
5734                         } else
5735                                 tg3_setup_phy(tp, 0);
5736                         spin_unlock(&tp->lock);
5737                 }
5738         }
5739 }
5740
5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742                                 struct tg3_rx_prodring_set *dpr,
5743                                 struct tg3_rx_prodring_set *spr)
5744 {
5745         u32 si, di, cpycnt, src_prod_idx;
5746         int i, err = 0;
5747
5748         while (1) {
5749                 src_prod_idx = spr->rx_std_prod_idx;
5750
5751                 /* Make sure updates to the rx_std_buffers[] entries and the
5752                  * standard producer index are seen in the correct order.
5753                  */
5754                 smp_rmb();
5755
5756                 if (spr->rx_std_cons_idx == src_prod_idx)
5757                         break;
5758
5759                 if (spr->rx_std_cons_idx < src_prod_idx)
5760                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761                 else
5762                         cpycnt = tp->rx_std_ring_mask + 1 -
5763                                  spr->rx_std_cons_idx;
5764
5765                 cpycnt = min(cpycnt,
5766                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767
5768                 si = spr->rx_std_cons_idx;
5769                 di = dpr->rx_std_prod_idx;
5770
5771                 for (i = di; i < di + cpycnt; i++) {
5772                         if (dpr->rx_std_buffers[i].data) {
5773                                 cpycnt = i - di;
5774                                 err = -ENOSPC;
5775                                 break;
5776                         }
5777                 }
5778
5779                 if (!cpycnt)
5780                         break;
5781
5782                 /* Ensure that updates to the rx_std_buffers ring and the
5783                  * shadowed hardware producer ring from tg3_recycle_skb() are
5784                  * ordered correctly WRT the skb check above.
5785                  */
5786                 smp_rmb();
5787
5788                 memcpy(&dpr->rx_std_buffers[di],
5789                        &spr->rx_std_buffers[si],
5790                        cpycnt * sizeof(struct ring_info));
5791
5792                 for (i = 0; i < cpycnt; i++, di++, si++) {
5793                         struct tg3_rx_buffer_desc *sbd, *dbd;
5794                         sbd = &spr->rx_std[si];
5795                         dbd = &dpr->rx_std[di];
5796                         dbd->addr_hi = sbd->addr_hi;
5797                         dbd->addr_lo = sbd->addr_lo;
5798                 }
5799
5800                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801                                        tp->rx_std_ring_mask;
5802                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803                                        tp->rx_std_ring_mask;
5804         }
5805
5806         while (1) {
5807                 src_prod_idx = spr->rx_jmb_prod_idx;
5808
5809                 /* Make sure updates to the rx_jmb_buffers[] entries and
5810                  * the jumbo producer index are seen in the correct order.
5811                  */
5812                 smp_rmb();
5813
5814                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5815                         break;
5816
5817                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5818                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819                 else
5820                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5821                                  spr->rx_jmb_cons_idx;
5822
5823                 cpycnt = min(cpycnt,
5824                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825
5826                 si = spr->rx_jmb_cons_idx;
5827                 di = dpr->rx_jmb_prod_idx;
5828
5829                 for (i = di; i < di + cpycnt; i++) {
5830                         if (dpr->rx_jmb_buffers[i].data) {
5831                                 cpycnt = i - di;
5832                                 err = -ENOSPC;
5833                                 break;
5834                         }
5835                 }
5836
5837                 if (!cpycnt)
5838                         break;
5839
5840                 /* Ensure that updates to the rx_jmb_buffers ring and the
5841                  * shadowed hardware producer ring from tg3_recycle_skb() are
5842                  * ordered correctly WRT the skb check above.
5843                  */
5844                 smp_rmb();
5845
5846                 memcpy(&dpr->rx_jmb_buffers[di],
5847                        &spr->rx_jmb_buffers[si],
5848                        cpycnt * sizeof(struct ring_info));
5849
5850                 for (i = 0; i < cpycnt; i++, di++, si++) {
5851                         struct tg3_rx_buffer_desc *sbd, *dbd;
5852                         sbd = &spr->rx_jmb[si].std;
5853                         dbd = &dpr->rx_jmb[di].std;
5854                         dbd->addr_hi = sbd->addr_hi;
5855                         dbd->addr_lo = sbd->addr_lo;
5856                 }
5857
5858                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859                                        tp->rx_jmb_ring_mask;
5860                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861                                        tp->rx_jmb_ring_mask;
5862         }
5863
5864         return err;
5865 }
5866
5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869         struct tg3 *tp = tnapi->tp;
5870
5871         /* run TX completion thread */
5872         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873                 tg3_tx(tnapi);
5874                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875                         return work_done;
5876         }
5877
5878         /* run RX thread, within the bounds set by NAPI.
5879          * All RX "locking" is done by ensuring outside
5880          * code synchronizes with tg3->napi.poll()
5881          */
5882         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883                 work_done += tg3_rx(tnapi, budget - work_done);
5884
5885         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887                 int i, err = 0;
5888                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5889                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890
5891                 for (i = 1; i < tp->irq_cnt; i++)
5892                         err |= tg3_rx_prodring_xfer(tp, dpr,
5893                                                     &tp->napi[i].prodring);
5894
5895                 wmb();
5896
5897                 if (std_prod_idx != dpr->rx_std_prod_idx)
5898                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899                                      dpr->rx_std_prod_idx);
5900
5901                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903                                      dpr->rx_jmb_prod_idx);
5904
5905                 mmiowb();
5906
5907                 if (err)
5908                         tw32_f(HOSTCC_MODE, tp->coal_now);
5909         }
5910
5911         return work_done;
5912 }
5913
5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917                 schedule_work(&tp->reset_task);
5918 }
5919
5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922         cancel_work_sync(&tp->reset_task);
5923         tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925
5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929         struct tg3 *tp = tnapi->tp;
5930         int work_done = 0;
5931         struct tg3_hw_status *sblk = tnapi->hw_status;
5932
5933         while (1) {
5934                 work_done = tg3_poll_work(tnapi, work_done, budget);
5935
5936                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937                         goto tx_recovery;
5938
5939                 if (unlikely(work_done >= budget))
5940                         break;
5941
5942                 /* tp->last_tag is used in tg3_int_reenable() below
5943                  * to tell the hw how much work has been processed,
5944                  * so we must read it before checking for more work.
5945                  */
5946                 tnapi->last_tag = sblk->status_tag;
5947                 tnapi->last_irq_tag = tnapi->last_tag;
5948                 rmb();
5949
5950                 /* check for RX/TX work to do */
5951                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953                         napi_complete(napi);
5954                         /* Reenable interrupts. */
5955                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956                         mmiowb();
5957                         break;
5958                 }
5959         }
5960
5961         return work_done;
5962
5963 tx_recovery:
5964         /* work_done is guaranteed to be less than budget. */
5965         napi_complete(napi);
5966         tg3_reset_task_schedule(tp);
5967         return work_done;
5968 }
5969
5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972         u32 val;
5973         bool real_error = false;
5974
5975         if (tg3_flag(tp, ERROR_PROCESSED))
5976                 return;
5977
5978         /* Check Flow Attention register */
5979         val = tr32(HOSTCC_FLOW_ATTN);
5980         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5982                 real_error = true;
5983         }
5984
5985         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5987                 real_error = true;
5988         }
5989
5990         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5992                 real_error = true;
5993         }
5994
5995         if (!real_error)
5996                 return;
5997
5998         tg3_dump_state(tp);
5999
6000         tg3_flag_set(tp, ERROR_PROCESSED);
6001         tg3_reset_task_schedule(tp);
6002 }
6003
6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007         struct tg3 *tp = tnapi->tp;
6008         int work_done = 0;
6009         struct tg3_hw_status *sblk = tnapi->hw_status;
6010
6011         while (1) {
6012                 if (sblk->status & SD_STATUS_ERROR)
6013                         tg3_process_error(tp);
6014
6015                 tg3_poll_link(tp);
6016
6017                 work_done = tg3_poll_work(tnapi, work_done, budget);
6018
6019                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020                         goto tx_recovery;
6021
6022                 if (unlikely(work_done >= budget))
6023                         break;
6024
6025                 if (tg3_flag(tp, TAGGED_STATUS)) {
6026                         /* tp->last_tag is used in tg3_int_reenable() below
6027                          * to tell the hw how much work has been processed,
6028                          * so we must read it before checking for more work.
6029                          */
6030                         tnapi->last_tag = sblk->status_tag;
6031                         tnapi->last_irq_tag = tnapi->last_tag;
6032                         rmb();
6033                 } else
6034                         sblk->status &= ~SD_STATUS_UPDATED;
6035
6036                 if (likely(!tg3_has_work(tnapi))) {
6037                         napi_complete(napi);
6038                         tg3_int_reenable(tnapi);
6039                         break;
6040                 }
6041         }
6042
6043         return work_done;
6044
6045 tx_recovery:
6046         /* work_done is guaranteed to be less than budget. */
6047         napi_complete(napi);
6048         tg3_reset_task_schedule(tp);
6049         return work_done;
6050 }
6051
6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054         int i;
6055
6056         for (i = tp->irq_cnt - 1; i >= 0; i--)
6057                 napi_disable(&tp->napi[i].napi);
6058 }
6059
6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062         int i;
6063
6064         for (i = 0; i < tp->irq_cnt; i++)
6065                 napi_enable(&tp->napi[i].napi);
6066 }
6067
6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070         int i;
6071
6072         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073         for (i = 1; i < tp->irq_cnt; i++)
6074                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076
6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079         int i;
6080
6081         for (i = 0; i < tp->irq_cnt; i++)
6082                 netif_napi_del(&tp->napi[i].napi);
6083 }
6084
6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6088         tg3_napi_disable(tp);
6089         netif_tx_disable(tp->dev);
6090 }
6091
6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094         /* NOTE: unconditional netif_tx_wake_all_queues is only
6095          * appropriate so long as all callers are assured to
6096          * have free tx slots (such as after tg3_init_hw)
6097          */
6098         netif_tx_wake_all_queues(tp->dev);
6099
6100         tg3_napi_enable(tp);
6101         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102         tg3_enable_ints(tp);
6103 }
6104
6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107         int i;
6108
6109         BUG_ON(tp->irq_sync);
6110
6111         tp->irq_sync = 1;
6112         smp_mb();
6113
6114         for (i = 0; i < tp->irq_cnt; i++)
6115                 synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120  * with as well.  Most of the time, this is not necessary except when
6121  * shutting down the device.
6122  */
6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125         spin_lock_bh(&tp->lock);
6126         if (irq_sync)
6127                 tg3_irq_quiesce(tp);
6128 }
6129
6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132         spin_unlock_bh(&tp->lock);
6133 }
6134
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136  * after sending MSI so driver doesn't have to do it.
6137  */
6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140         struct tg3_napi *tnapi = dev_id;
6141         struct tg3 *tp = tnapi->tp;
6142
6143         prefetch(tnapi->hw_status);
6144         if (tnapi->rx_rcb)
6145                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146
6147         if (likely(!tg3_irq_sync(tp)))
6148                 napi_schedule(&tnapi->napi);
6149
6150         return IRQ_HANDLED;
6151 }
6152
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154  * flush status block and interrupt mailbox. PCI ordering rules
6155  * guarantee that MSI will arrive after the status block.
6156  */
6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159         struct tg3_napi *tnapi = dev_id;
6160         struct tg3 *tp = tnapi->tp;
6161
6162         prefetch(tnapi->hw_status);
6163         if (tnapi->rx_rcb)
6164                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165         /*
6166          * Writing any value to intr-mbox-0 clears PCI INTA# and
6167          * chip-internal interrupt pending events.
6168          * Writing non-zero to intr-mbox-0 additional tells the
6169          * NIC to stop sending us irqs, engaging "in-intr-handler"
6170          * event coalescing.
6171          */
6172         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173         if (likely(!tg3_irq_sync(tp)))
6174                 napi_schedule(&tnapi->napi);
6175
6176         return IRQ_RETVAL(1);
6177 }
6178
6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181         struct tg3_napi *tnapi = dev_id;
6182         struct tg3 *tp = tnapi->tp;
6183         struct tg3_hw_status *sblk = tnapi->hw_status;
6184         unsigned int handled = 1;
6185
6186         /* In INTx mode, it is possible for the interrupt to arrive at
6187          * the CPU before the status block posted prior to the interrupt.
6188          * Reading the PCI State register will confirm whether the
6189          * interrupt is ours and will flush the status block.
6190          */
6191         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192                 if (tg3_flag(tp, CHIP_RESETTING) ||
6193                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194                         handled = 0;
6195                         goto out;
6196                 }
6197         }
6198
6199         /*
6200          * Writing any value to intr-mbox-0 clears PCI INTA# and
6201          * chip-internal interrupt pending events.
6202          * Writing non-zero to intr-mbox-0 additional tells the
6203          * NIC to stop sending us irqs, engaging "in-intr-handler"
6204          * event coalescing.
6205          *
6206          * Flush the mailbox to de-assert the IRQ immediately to prevent
6207          * spurious interrupts.  The flush impacts performance but
6208          * excessive spurious interrupts can be worse in some cases.
6209          */
6210         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211         if (tg3_irq_sync(tp))
6212                 goto out;
6213         sblk->status &= ~SD_STATUS_UPDATED;
6214         if (likely(tg3_has_work(tnapi))) {
6215                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216                 napi_schedule(&tnapi->napi);
6217         } else {
6218                 /* No work, shared interrupt perhaps?  re-enable
6219                  * interrupts, and flush that PCI write
6220                  */
6221                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222                                0x00000000);
6223         }
6224 out:
6225         return IRQ_RETVAL(handled);
6226 }
6227
6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230         struct tg3_napi *tnapi = dev_id;
6231         struct tg3 *tp = tnapi->tp;
6232         struct tg3_hw_status *sblk = tnapi->hw_status;
6233         unsigned int handled = 1;
6234
6235         /* In INTx mode, it is possible for the interrupt to arrive at
6236          * the CPU before the status block posted prior to the interrupt.
6237          * Reading the PCI State register will confirm whether the
6238          * interrupt is ours and will flush the status block.
6239          */
6240         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241                 if (tg3_flag(tp, CHIP_RESETTING) ||
6242                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243                         handled = 0;
6244                         goto out;
6245                 }
6246         }
6247
6248         /*
6249          * writing any value to intr-mbox-0 clears PCI INTA# and
6250          * chip-internal interrupt pending events.
6251          * writing non-zero to intr-mbox-0 additional tells the
6252          * NIC to stop sending us irqs, engaging "in-intr-handler"
6253          * event coalescing.
6254          *
6255          * Flush the mailbox to de-assert the IRQ immediately to prevent
6256          * spurious interrupts.  The flush impacts performance but
6257          * excessive spurious interrupts can be worse in some cases.
6258          */
6259         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260
6261         /*
6262          * In a shared interrupt configuration, sometimes other devices'
6263          * interrupts will scream.  We record the current status tag here
6264          * so that the above check can report that the screaming interrupts
6265          * are unhandled.  Eventually they will be silenced.
6266          */
6267         tnapi->last_irq_tag = sblk->status_tag;
6268
6269         if (tg3_irq_sync(tp))
6270                 goto out;
6271
6272         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273
6274         napi_schedule(&tnapi->napi);
6275
6276 out:
6277         return IRQ_RETVAL(handled);
6278 }
6279
6280 /* ISR for interrupt test */
6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283         struct tg3_napi *tnapi = dev_id;
6284         struct tg3 *tp = tnapi->tp;
6285         struct tg3_hw_status *sblk = tnapi->hw_status;
6286
6287         if ((sblk->status & SD_STATUS_UPDATED) ||
6288             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289                 tg3_disable_ints(tp);
6290                 return IRQ_RETVAL(1);
6291         }
6292         return IRQ_RETVAL(0);
6293 }
6294
6295 #ifdef CONFIG_NET_POLL_CONTROLLER
6296 static void tg3_poll_controller(struct net_device *dev)
6297 {
6298         int i;
6299         struct tg3 *tp = netdev_priv(dev);
6300
6301         for (i = 0; i < tp->irq_cnt; i++)
6302                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6303 }
6304 #endif
6305
6306 static void tg3_tx_timeout(struct net_device *dev)
6307 {
6308         struct tg3 *tp = netdev_priv(dev);
6309
6310         if (netif_msg_tx_err(tp)) {
6311                 netdev_err(dev, "transmit timed out, resetting\n");
6312                 tg3_dump_state(tp);
6313         }
6314
6315         tg3_reset_task_schedule(tp);
6316 }
6317
6318 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6319 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6320 {
6321         u32 base = (u32) mapping & 0xffffffff;
6322
6323         return (base > 0xffffdcc0) && (base + len + 8 < base);
6324 }
6325
6326 /* Test for DMA addresses > 40-bit */
6327 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6328                                           int len)
6329 {
6330 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6331         if (tg3_flag(tp, 40BIT_DMA_BUG))
6332                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6333         return 0;
6334 #else
6335         return 0;
6336 #endif
6337 }
6338
6339 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6340                                  dma_addr_t mapping, u32 len, u32 flags,
6341                                  u32 mss, u32 vlan)
6342 {
6343         txbd->addr_hi = ((u64) mapping >> 32);
6344         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6345         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6346         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6347 }
6348
6349 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6350                             dma_addr_t map, u32 len, u32 flags,
6351                             u32 mss, u32 vlan)
6352 {
6353         struct tg3 *tp = tnapi->tp;
6354         bool hwbug = false;
6355
6356         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6357                 hwbug = true;
6358
6359         if (tg3_4g_overflow_test(map, len))
6360                 hwbug = true;
6361
6362         if (tg3_40bit_overflow_test(tp, map, len))
6363                 hwbug = true;
6364
6365         if (tp->dma_limit) {
6366                 u32 prvidx = *entry;
6367                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6368                 while (len > tp->dma_limit && *budget) {
6369                         u32 frag_len = tp->dma_limit;
6370                         len -= tp->dma_limit;
6371
6372                         /* Avoid the 8byte DMA problem */
6373                         if (len <= 8) {
6374                                 len += tp->dma_limit / 2;
6375                                 frag_len = tp->dma_limit / 2;
6376                         }
6377
6378                         tnapi->tx_buffers[*entry].fragmented = true;
6379
6380                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6381                                       frag_len, tmp_flag, mss, vlan);
6382                         *budget -= 1;
6383                         prvidx = *entry;
6384                         *entry = NEXT_TX(*entry);
6385
6386                         map += frag_len;
6387                 }
6388
6389                 if (len) {
6390                         if (*budget) {
6391                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6392                                               len, flags, mss, vlan);
6393                                 *budget -= 1;
6394                                 *entry = NEXT_TX(*entry);
6395                         } else {
6396                                 hwbug = true;
6397                                 tnapi->tx_buffers[prvidx].fragmented = false;
6398                         }
6399                 }
6400         } else {
6401                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6402                               len, flags, mss, vlan);
6403                 *entry = NEXT_TX(*entry);
6404         }
6405
6406         return hwbug;
6407 }
6408
6409 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6410 {
6411         int i;
6412         struct sk_buff *skb;
6413         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6414
6415         skb = txb->skb;
6416         txb->skb = NULL;
6417
6418         pci_unmap_single(tnapi->tp->pdev,
6419                          dma_unmap_addr(txb, mapping),
6420                          skb_headlen(skb),
6421                          PCI_DMA_TODEVICE);
6422
6423         while (txb->fragmented) {
6424                 txb->fragmented = false;
6425                 entry = NEXT_TX(entry);
6426                 txb = &tnapi->tx_buffers[entry];
6427         }
6428
6429         for (i = 0; i <= last; i++) {
6430                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6431
6432                 entry = NEXT_TX(entry);
6433                 txb = &tnapi->tx_buffers[entry];
6434
6435                 pci_unmap_page(tnapi->tp->pdev,
6436                                dma_unmap_addr(txb, mapping),
6437                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6438
6439                 while (txb->fragmented) {
6440                         txb->fragmented = false;
6441                         entry = NEXT_TX(entry);
6442                         txb = &tnapi->tx_buffers[entry];
6443                 }
6444         }
6445 }
6446
6447 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6448 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6449                                        struct sk_buff **pskb,
6450                                        u32 *entry, u32 *budget,
6451                                        u32 base_flags, u32 mss, u32 vlan)
6452 {
6453         struct tg3 *tp = tnapi->tp;
6454         struct sk_buff *new_skb, *skb = *pskb;
6455         dma_addr_t new_addr = 0;
6456         int ret = 0;
6457
6458         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6459                 new_skb = skb_copy(skb, GFP_ATOMIC);
6460         else {
6461                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6462
6463                 new_skb = skb_copy_expand(skb,
6464                                           skb_headroom(skb) + more_headroom,
6465                                           skb_tailroom(skb), GFP_ATOMIC);
6466         }
6467
6468         if (!new_skb) {
6469                 ret = -1;
6470         } else {
6471                 /* New SKB is guaranteed to be linear. */
6472                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6473                                           PCI_DMA_TODEVICE);
6474                 /* Make sure the mapping succeeded */
6475                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6476                         dev_kfree_skb(new_skb);
6477                         ret = -1;
6478                 } else {
6479                         u32 save_entry = *entry;
6480
6481                         base_flags |= TXD_FLAG_END;
6482
6483                         tnapi->tx_buffers[*entry].skb = new_skb;
6484                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6485                                            mapping, new_addr);
6486
6487                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6488                                             new_skb->len, base_flags,
6489                                             mss, vlan)) {
6490                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6491                                 dev_kfree_skb(new_skb);
6492                                 ret = -1;
6493                         }
6494                 }
6495         }
6496
6497         dev_kfree_skb(skb);
6498         *pskb = new_skb;
6499         return ret;
6500 }
6501
6502 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6503
6504 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6505  * TSO header is greater than 80 bytes.
6506  */
6507 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6508 {
6509         struct sk_buff *segs, *nskb;
6510         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6511
6512         /* Estimate the number of fragments in the worst case */
6513         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6514                 netif_stop_queue(tp->dev);
6515
6516                 /* netif_tx_stop_queue() must be done before checking
6517                  * checking tx index in tg3_tx_avail() below, because in
6518                  * tg3_tx(), we update tx index before checking for
6519                  * netif_tx_queue_stopped().
6520                  */
6521                 smp_mb();
6522                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6523                         return NETDEV_TX_BUSY;
6524
6525                 netif_wake_queue(tp->dev);
6526         }
6527
6528         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6529         if (IS_ERR(segs))
6530                 goto tg3_tso_bug_end;
6531
6532         do {
6533                 nskb = segs;
6534                 segs = segs->next;
6535                 nskb->next = NULL;
6536                 tg3_start_xmit(nskb, tp->dev);
6537         } while (segs);
6538
6539 tg3_tso_bug_end:
6540         dev_kfree_skb(skb);
6541
6542         return NETDEV_TX_OK;
6543 }
6544
6545 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6546  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6547  */
6548 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6549 {
6550         struct tg3 *tp = netdev_priv(dev);
6551         u32 len, entry, base_flags, mss, vlan = 0;
6552         u32 budget;
6553         int i = -1, would_hit_hwbug;
6554         dma_addr_t mapping;
6555         struct tg3_napi *tnapi;
6556         struct netdev_queue *txq;
6557         unsigned int last;
6558
6559         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6560         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6561         if (tg3_flag(tp, ENABLE_TSS))
6562                 tnapi++;
6563
6564         budget = tg3_tx_avail(tnapi);
6565
6566         /* We are running in BH disabled context with netif_tx_lock
6567          * and TX reclaim runs via tp->napi.poll inside of a software
6568          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6569          * no IRQ context deadlocks to worry about either.  Rejoice!
6570          */
6571         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6572                 if (!netif_tx_queue_stopped(txq)) {
6573                         netif_tx_stop_queue(txq);
6574
6575                         /* This is a hard error, log it. */
6576                         netdev_err(dev,
6577                                    "BUG! Tx Ring full when queue awake!\n");
6578                 }
6579                 return NETDEV_TX_BUSY;
6580         }
6581
6582         entry = tnapi->tx_prod;
6583         base_flags = 0;
6584         if (skb->ip_summed == CHECKSUM_PARTIAL)
6585                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6586
6587         mss = skb_shinfo(skb)->gso_size;
6588         if (mss) {
6589                 struct iphdr *iph;
6590                 u32 tcp_opt_len, hdr_len;
6591
6592                 if (skb_header_cloned(skb) &&
6593                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6594                         goto drop;
6595
6596                 iph = ip_hdr(skb);
6597                 tcp_opt_len = tcp_optlen(skb);
6598
6599                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6600
6601                 if (!skb_is_gso_v6(skb)) {
6602                         iph->check = 0;
6603                         iph->tot_len = htons(mss + hdr_len);
6604                 }
6605
6606                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6607                     tg3_flag(tp, TSO_BUG))
6608                         return tg3_tso_bug(tp, skb);
6609
6610                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6611                                TXD_FLAG_CPU_POST_DMA);
6612
6613                 if (tg3_flag(tp, HW_TSO_1) ||
6614                     tg3_flag(tp, HW_TSO_2) ||
6615                     tg3_flag(tp, HW_TSO_3)) {
6616                         tcp_hdr(skb)->check = 0;
6617                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6618                 } else
6619                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6620                                                                  iph->daddr, 0,
6621                                                                  IPPROTO_TCP,
6622                                                                  0);
6623
6624                 if (tg3_flag(tp, HW_TSO_3)) {
6625                         mss |= (hdr_len & 0xc) << 12;
6626                         if (hdr_len & 0x10)
6627                                 base_flags |= 0x00000010;
6628                         base_flags |= (hdr_len & 0x3e0) << 5;
6629                 } else if (tg3_flag(tp, HW_TSO_2))
6630                         mss |= hdr_len << 9;
6631                 else if (tg3_flag(tp, HW_TSO_1) ||
6632                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6633                         if (tcp_opt_len || iph->ihl > 5) {
6634                                 int tsflags;
6635
6636                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6637                                 mss |= (tsflags << 11);
6638                         }
6639                 } else {
6640                         if (tcp_opt_len || iph->ihl > 5) {
6641                                 int tsflags;
6642
6643                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6644                                 base_flags |= tsflags << 12;
6645                         }
6646                 }
6647         }
6648
6649         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6650             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6651                 base_flags |= TXD_FLAG_JMB_PKT;
6652
6653         if (vlan_tx_tag_present(skb)) {
6654                 base_flags |= TXD_FLAG_VLAN;
6655                 vlan = vlan_tx_tag_get(skb);
6656         }
6657
6658         len = skb_headlen(skb);
6659
6660         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6661         if (pci_dma_mapping_error(tp->pdev, mapping))
6662                 goto drop;
6663
6664
6665         tnapi->tx_buffers[entry].skb = skb;
6666         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6667
6668         would_hit_hwbug = 0;
6669
6670         if (tg3_flag(tp, 5701_DMA_BUG))
6671                 would_hit_hwbug = 1;
6672
6673         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6674                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6675                             mss, vlan)) {
6676                 would_hit_hwbug = 1;
6677         /* Now loop through additional data fragments, and queue them. */
6678         } else if (skb_shinfo(skb)->nr_frags > 0) {
6679                 u32 tmp_mss = mss;
6680
6681                 if (!tg3_flag(tp, HW_TSO_1) &&
6682                     !tg3_flag(tp, HW_TSO_2) &&
6683                     !tg3_flag(tp, HW_TSO_3))
6684                         tmp_mss = 0;
6685
6686                 last = skb_shinfo(skb)->nr_frags - 1;
6687                 for (i = 0; i <= last; i++) {
6688                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6689
6690                         len = skb_frag_size(frag);
6691                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6692                                                    len, DMA_TO_DEVICE);
6693
6694                         tnapi->tx_buffers[entry].skb = NULL;
6695                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6696                                            mapping);
6697                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6698                                 goto dma_error;
6699
6700                         if (!budget ||
6701                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6702                                             len, base_flags |
6703                                             ((i == last) ? TXD_FLAG_END : 0),
6704                                             tmp_mss, vlan)) {
6705                                 would_hit_hwbug = 1;
6706                                 break;
6707                         }
6708                 }
6709         }
6710
6711         if (would_hit_hwbug) {
6712                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6713
6714                 /* If the workaround fails due to memory/mapping
6715                  * failure, silently drop this packet.
6716                  */
6717                 entry = tnapi->tx_prod;
6718                 budget = tg3_tx_avail(tnapi);
6719                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6720                                                 base_flags, mss, vlan))
6721                         goto drop_nofree;
6722         }
6723
6724         skb_tx_timestamp(skb);
6725         netdev_sent_queue(tp->dev, skb->len);
6726
6727         /* Packets are ready, update Tx producer idx local and on card. */
6728         tw32_tx_mbox(tnapi->prodmbox, entry);
6729
6730         tnapi->tx_prod = entry;
6731         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6732                 netif_tx_stop_queue(txq);
6733
6734                 /* netif_tx_stop_queue() must be done before checking
6735                  * checking tx index in tg3_tx_avail() below, because in
6736                  * tg3_tx(), we update tx index before checking for
6737                  * netif_tx_queue_stopped().
6738                  */
6739                 smp_mb();
6740                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6741                         netif_tx_wake_queue(txq);
6742         }
6743
6744         mmiowb();
6745         return NETDEV_TX_OK;
6746
6747 dma_error:
6748         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6749         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6750 drop:
6751         dev_kfree_skb(skb);
6752 drop_nofree:
6753         tp->tx_dropped++;
6754         return NETDEV_TX_OK;
6755 }
6756
6757 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6758 {
6759         if (enable) {
6760                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6761                                   MAC_MODE_PORT_MODE_MASK);
6762
6763                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6764
6765                 if (!tg3_flag(tp, 5705_PLUS))
6766                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6767
6768                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6769                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6770                 else
6771                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6772         } else {
6773                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6774
6775                 if (tg3_flag(tp, 5705_PLUS) ||
6776                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6777                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6778                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6779         }
6780
6781         tw32(MAC_MODE, tp->mac_mode);
6782         udelay(40);
6783 }
6784
6785 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6786 {
6787         u32 val, bmcr, mac_mode, ptest = 0;
6788
6789         tg3_phy_toggle_apd(tp, false);
6790         tg3_phy_toggle_automdix(tp, 0);
6791
6792         if (extlpbk && tg3_phy_set_extloopbk(tp))
6793                 return -EIO;
6794
6795         bmcr = BMCR_FULLDPLX;
6796         switch (speed) {
6797         case SPEED_10:
6798                 break;
6799         case SPEED_100:
6800                 bmcr |= BMCR_SPEED100;
6801                 break;
6802         case SPEED_1000:
6803         default:
6804                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6805                         speed = SPEED_100;
6806                         bmcr |= BMCR_SPEED100;
6807                 } else {
6808                         speed = SPEED_1000;
6809                         bmcr |= BMCR_SPEED1000;
6810                 }
6811         }
6812
6813         if (extlpbk) {
6814                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6815                         tg3_readphy(tp, MII_CTRL1000, &val);
6816                         val |= CTL1000_AS_MASTER |
6817                                CTL1000_ENABLE_MASTER;
6818                         tg3_writephy(tp, MII_CTRL1000, val);
6819                 } else {
6820                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6821                                 MII_TG3_FET_PTEST_TRIM_2;
6822                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6823                 }
6824         } else
6825                 bmcr |= BMCR_LOOPBACK;
6826
6827         tg3_writephy(tp, MII_BMCR, bmcr);
6828
6829         /* The write needs to be flushed for the FETs */
6830         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6831                 tg3_readphy(tp, MII_BMCR, &bmcr);
6832
6833         udelay(40);
6834
6835         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6837                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6838                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6839                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6840
6841                 /* The write needs to be flushed for the AC131 */
6842                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6843         }
6844
6845         /* Reset to prevent losing 1st rx packet intermittently */
6846         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6847             tg3_flag(tp, 5780_CLASS)) {
6848                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6849                 udelay(10);
6850                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6851         }
6852
6853         mac_mode = tp->mac_mode &
6854                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6855         if (speed == SPEED_1000)
6856                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6857         else
6858                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6859
6860         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6861                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6862
6863                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6864                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6865                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6866                         mac_mode |= MAC_MODE_LINK_POLARITY;
6867
6868                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6869                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6870         }
6871
6872         tw32(MAC_MODE, mac_mode);
6873         udelay(40);
6874
6875         return 0;
6876 }
6877
6878 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6879 {
6880         struct tg3 *tp = netdev_priv(dev);
6881
6882         if (features & NETIF_F_LOOPBACK) {
6883                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6884                         return;
6885
6886                 spin_lock_bh(&tp->lock);
6887                 tg3_mac_loopback(tp, true);
6888                 netif_carrier_on(tp->dev);
6889                 spin_unlock_bh(&tp->lock);
6890                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6891         } else {
6892                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6893                         return;
6894
6895                 spin_lock_bh(&tp->lock);
6896                 tg3_mac_loopback(tp, false);
6897                 /* Force link status check */
6898                 tg3_setup_phy(tp, 1);
6899                 spin_unlock_bh(&tp->lock);
6900                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6901         }
6902 }
6903
6904 static netdev_features_t tg3_fix_features(struct net_device *dev,
6905         netdev_features_t features)
6906 {
6907         struct tg3 *tp = netdev_priv(dev);
6908
6909         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6910                 features &= ~NETIF_F_ALL_TSO;
6911
6912         return features;
6913 }
6914
6915 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6916 {
6917         netdev_features_t changed = dev->features ^ features;
6918
6919         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6920                 tg3_set_loopback(dev, features);
6921
6922         return 0;
6923 }
6924
6925 static void tg3_rx_prodring_free(struct tg3 *tp,
6926                                  struct tg3_rx_prodring_set *tpr)
6927 {
6928         int i;
6929
6930         if (tpr != &tp->napi[0].prodring) {
6931                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6932                      i = (i + 1) & tp->rx_std_ring_mask)
6933                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
6934                                         tp->rx_pkt_map_sz);
6935
6936                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6937                         for (i = tpr->rx_jmb_cons_idx;
6938                              i != tpr->rx_jmb_prod_idx;
6939                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6940                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
6941                                                 TG3_RX_JMB_MAP_SZ);
6942                         }
6943                 }
6944
6945                 return;
6946         }
6947
6948         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6949                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
6950                                 tp->rx_pkt_map_sz);
6951
6952         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6953                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6954                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
6955                                         TG3_RX_JMB_MAP_SZ);
6956         }
6957 }
6958
6959 /* Initialize rx rings for packet processing.
6960  *
6961  * The chip has been shut down and the driver detached from
6962  * the networking, so no interrupts or new tx packets will
6963  * end up in the driver.  tp->{tx,}lock are held and thus
6964  * we may not sleep.
6965  */
6966 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6967                                  struct tg3_rx_prodring_set *tpr)
6968 {
6969         u32 i, rx_pkt_dma_sz;
6970
6971         tpr->rx_std_cons_idx = 0;
6972         tpr->rx_std_prod_idx = 0;
6973         tpr->rx_jmb_cons_idx = 0;
6974         tpr->rx_jmb_prod_idx = 0;
6975
6976         if (tpr != &tp->napi[0].prodring) {
6977                 memset(&tpr->rx_std_buffers[0], 0,
6978                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6979                 if (tpr->rx_jmb_buffers)
6980                         memset(&tpr->rx_jmb_buffers[0], 0,
6981                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6982                 goto done;
6983         }
6984
6985         /* Zero out all descriptors. */
6986         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6987
6988         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6989         if (tg3_flag(tp, 5780_CLASS) &&
6990             tp->dev->mtu > ETH_DATA_LEN)
6991                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6992         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6993
6994         /* Initialize invariants of the rings, we only set this
6995          * stuff once.  This works because the card does not
6996          * write into the rx buffer posting rings.
6997          */
6998         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6999                 struct tg3_rx_buffer_desc *rxd;
7000
7001                 rxd = &tpr->rx_std[i];
7002                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7003                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7004                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7005                                (i << RXD_OPAQUE_INDEX_SHIFT));
7006         }
7007
7008         /* Now allocate fresh SKBs for each rx ring. */
7009         for (i = 0; i < tp->rx_pending; i++) {
7010                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7011                         netdev_warn(tp->dev,
7012                                     "Using a smaller RX standard ring. Only "
7013                                     "%d out of %d buffers were allocated "
7014                                     "successfully\n", i, tp->rx_pending);
7015                         if (i == 0)
7016                                 goto initfail;
7017                         tp->rx_pending = i;
7018                         break;
7019                 }
7020         }
7021
7022         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7023                 goto done;
7024
7025         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7026
7027         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7028                 goto done;
7029
7030         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7031                 struct tg3_rx_buffer_desc *rxd;
7032
7033                 rxd = &tpr->rx_jmb[i].std;
7034                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7035                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7036                                   RXD_FLAG_JUMBO;
7037                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7038                        (i << RXD_OPAQUE_INDEX_SHIFT));
7039         }
7040
7041         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7042                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7043                         netdev_warn(tp->dev,
7044                                     "Using a smaller RX jumbo ring. Only %d "
7045                                     "out of %d buffers were allocated "
7046                                     "successfully\n", i, tp->rx_jumbo_pending);
7047                         if (i == 0)
7048                                 goto initfail;
7049                         tp->rx_jumbo_pending = i;
7050                         break;
7051                 }
7052         }
7053
7054 done:
7055         return 0;
7056
7057 initfail:
7058         tg3_rx_prodring_free(tp, tpr);
7059         return -ENOMEM;
7060 }
7061
7062 static void tg3_rx_prodring_fini(struct tg3 *tp,
7063                                  struct tg3_rx_prodring_set *tpr)
7064 {
7065         kfree(tpr->rx_std_buffers);
7066         tpr->rx_std_buffers = NULL;
7067         kfree(tpr->rx_jmb_buffers);
7068         tpr->rx_jmb_buffers = NULL;
7069         if (tpr->rx_std) {
7070                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7071                                   tpr->rx_std, tpr->rx_std_mapping);
7072                 tpr->rx_std = NULL;
7073         }
7074         if (tpr->rx_jmb) {
7075                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7076                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7077                 tpr->rx_jmb = NULL;
7078         }
7079 }
7080
7081 static int tg3_rx_prodring_init(struct tg3 *tp,
7082                                 struct tg3_rx_prodring_set *tpr)
7083 {
7084         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7085                                       GFP_KERNEL);
7086         if (!tpr->rx_std_buffers)
7087                 return -ENOMEM;
7088
7089         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7090                                          TG3_RX_STD_RING_BYTES(tp),
7091                                          &tpr->rx_std_mapping,
7092                                          GFP_KERNEL);
7093         if (!tpr->rx_std)
7094                 goto err_out;
7095
7096         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7098                                               GFP_KERNEL);
7099                 if (!tpr->rx_jmb_buffers)
7100                         goto err_out;
7101
7102                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7103                                                  TG3_RX_JMB_RING_BYTES(tp),
7104                                                  &tpr->rx_jmb_mapping,
7105                                                  GFP_KERNEL);
7106                 if (!tpr->rx_jmb)
7107                         goto err_out;
7108         }
7109
7110         return 0;
7111
7112 err_out:
7113         tg3_rx_prodring_fini(tp, tpr);
7114         return -ENOMEM;
7115 }
7116
7117 /* Free up pending packets in all rx/tx rings.
7118  *
7119  * The chip has been shut down and the driver detached from
7120  * the networking, so no interrupts or new tx packets will
7121  * end up in the driver.  tp->{tx,}lock is not held and we are not
7122  * in an interrupt context and thus may sleep.
7123  */
7124 static void tg3_free_rings(struct tg3 *tp)
7125 {
7126         int i, j;
7127
7128         for (j = 0; j < tp->irq_cnt; j++) {
7129                 struct tg3_napi *tnapi = &tp->napi[j];
7130
7131                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7132
7133                 if (!tnapi->tx_buffers)
7134                         continue;
7135
7136                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7137                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7138
7139                         if (!skb)
7140                                 continue;
7141
7142                         tg3_tx_skb_unmap(tnapi, i,
7143                                          skb_shinfo(skb)->nr_frags - 1);
7144
7145                         dev_kfree_skb_any(skb);
7146                 }
7147         }
7148         netdev_reset_queue(tp->dev);
7149 }
7150
7151 /* Initialize tx/rx rings for packet processing.
7152  *
7153  * The chip has been shut down and the driver detached from
7154  * the networking, so no interrupts or new tx packets will
7155  * end up in the driver.  tp->{tx,}lock are held and thus
7156  * we may not sleep.
7157  */
7158 static int tg3_init_rings(struct tg3 *tp)
7159 {
7160         int i;
7161
7162         /* Free up all the SKBs. */
7163         tg3_free_rings(tp);
7164
7165         for (i = 0; i < tp->irq_cnt; i++) {
7166                 struct tg3_napi *tnapi = &tp->napi[i];
7167
7168                 tnapi->last_tag = 0;
7169                 tnapi->last_irq_tag = 0;
7170                 tnapi->hw_status->status = 0;
7171                 tnapi->hw_status->status_tag = 0;
7172                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7173
7174                 tnapi->tx_prod = 0;
7175                 tnapi->tx_cons = 0;
7176                 if (tnapi->tx_ring)
7177                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7178
7179                 tnapi->rx_rcb_ptr = 0;
7180                 if (tnapi->rx_rcb)
7181                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7182
7183                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7184                         tg3_free_rings(tp);
7185                         return -ENOMEM;
7186                 }
7187         }
7188
7189         return 0;
7190 }
7191
7192 /*
7193  * Must not be invoked with interrupt sources disabled and
7194  * the hardware shutdown down.
7195  */
7196 static void tg3_free_consistent(struct tg3 *tp)
7197 {
7198         int i;
7199
7200         for (i = 0; i < tp->irq_cnt; i++) {
7201                 struct tg3_napi *tnapi = &tp->napi[i];
7202
7203                 if (tnapi->tx_ring) {
7204                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7205                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7206                         tnapi->tx_ring = NULL;
7207                 }
7208
7209                 kfree(tnapi->tx_buffers);
7210                 tnapi->tx_buffers = NULL;
7211
7212                 if (tnapi->rx_rcb) {
7213                         dma_free_coherent(&tp->pdev->dev,
7214                                           TG3_RX_RCB_RING_BYTES(tp),
7215                                           tnapi->rx_rcb,
7216                                           tnapi->rx_rcb_mapping);
7217                         tnapi->rx_rcb = NULL;
7218                 }
7219
7220                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7221
7222                 if (tnapi->hw_status) {
7223                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7224                                           tnapi->hw_status,
7225                                           tnapi->status_mapping);
7226                         tnapi->hw_status = NULL;
7227                 }
7228         }
7229
7230         if (tp->hw_stats) {
7231                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7232                                   tp->hw_stats, tp->stats_mapping);
7233                 tp->hw_stats = NULL;
7234         }
7235 }
7236
7237 /*
7238  * Must not be invoked with interrupt sources disabled and
7239  * the hardware shutdown down.  Can sleep.
7240  */
7241 static int tg3_alloc_consistent(struct tg3 *tp)
7242 {
7243         int i;
7244
7245         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7246                                           sizeof(struct tg3_hw_stats),
7247                                           &tp->stats_mapping,
7248                                           GFP_KERNEL);
7249         if (!tp->hw_stats)
7250                 goto err_out;
7251
7252         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7253
7254         for (i = 0; i < tp->irq_cnt; i++) {
7255                 struct tg3_napi *tnapi = &tp->napi[i];
7256                 struct tg3_hw_status *sblk;
7257
7258                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7259                                                       TG3_HW_STATUS_SIZE,
7260                                                       &tnapi->status_mapping,
7261                                                       GFP_KERNEL);
7262                 if (!tnapi->hw_status)
7263                         goto err_out;
7264
7265                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7266                 sblk = tnapi->hw_status;
7267
7268                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7269                         goto err_out;
7270
7271                 /* If multivector TSS is enabled, vector 0 does not handle
7272                  * tx interrupts.  Don't allocate any resources for it.
7273                  */
7274                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7275                     (i && tg3_flag(tp, ENABLE_TSS))) {
7276                         tnapi->tx_buffers = kzalloc(
7277                                                sizeof(struct tg3_tx_ring_info) *
7278                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7279                         if (!tnapi->tx_buffers)
7280                                 goto err_out;
7281
7282                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7283                                                             TG3_TX_RING_BYTES,
7284                                                         &tnapi->tx_desc_mapping,
7285                                                             GFP_KERNEL);
7286                         if (!tnapi->tx_ring)
7287                                 goto err_out;
7288                 }
7289
7290                 /*
7291                  * When RSS is enabled, the status block format changes
7292                  * slightly.  The "rx_jumbo_consumer", "reserved",
7293                  * and "rx_mini_consumer" members get mapped to the
7294                  * other three rx return ring producer indexes.
7295                  */
7296                 switch (i) {
7297                 default:
7298                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7299                         break;
7300                 case 2:
7301                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7302                         break;
7303                 case 3:
7304                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7305                         break;
7306                 case 4:
7307                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7308                         break;
7309                 }
7310
7311                 /*
7312                  * If multivector RSS is enabled, vector 0 does not handle
7313                  * rx or tx interrupts.  Don't allocate any resources for it.
7314                  */
7315                 if (!i && tg3_flag(tp, ENABLE_RSS))
7316                         continue;
7317
7318                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7319                                                    TG3_RX_RCB_RING_BYTES(tp),
7320                                                    &tnapi->rx_rcb_mapping,
7321                                                    GFP_KERNEL);
7322                 if (!tnapi->rx_rcb)
7323                         goto err_out;
7324
7325                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7326         }
7327
7328         return 0;
7329
7330 err_out:
7331         tg3_free_consistent(tp);
7332         return -ENOMEM;
7333 }
7334
7335 #define MAX_WAIT_CNT 1000
7336
7337 /* To stop a block, clear the enable bit and poll till it
7338  * clears.  tp->lock is held.
7339  */
7340 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7341 {
7342         unsigned int i;
7343         u32 val;
7344
7345         if (tg3_flag(tp, 5705_PLUS)) {
7346                 switch (ofs) {
7347                 case RCVLSC_MODE:
7348                 case DMAC_MODE:
7349                 case MBFREE_MODE:
7350                 case BUFMGR_MODE:
7351                 case MEMARB_MODE:
7352                         /* We can't enable/disable these bits of the
7353                          * 5705/5750, just say success.
7354                          */
7355                         return 0;
7356
7357                 default:
7358                         break;
7359                 }
7360         }
7361
7362         val = tr32(ofs);
7363         val &= ~enable_bit;
7364         tw32_f(ofs, val);
7365
7366         for (i = 0; i < MAX_WAIT_CNT; i++) {
7367                 udelay(100);
7368                 val = tr32(ofs);
7369                 if ((val & enable_bit) == 0)
7370                         break;
7371         }
7372
7373         if (i == MAX_WAIT_CNT && !silent) {
7374                 dev_err(&tp->pdev->dev,
7375                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7376                         ofs, enable_bit);
7377                 return -ENODEV;
7378         }
7379
7380         return 0;
7381 }
7382
7383 /* tp->lock is held. */
7384 static int tg3_abort_hw(struct tg3 *tp, int silent)
7385 {
7386         int i, err;
7387
7388         tg3_disable_ints(tp);
7389
7390         tp->rx_mode &= ~RX_MODE_ENABLE;
7391         tw32_f(MAC_RX_MODE, tp->rx_mode);
7392         udelay(10);
7393
7394         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7395         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7396         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7397         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7398         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7399         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7400
7401         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7402         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7403         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7404         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7405         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7406         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7407         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7408
7409         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7410         tw32_f(MAC_MODE, tp->mac_mode);
7411         udelay(40);
7412
7413         tp->tx_mode &= ~TX_MODE_ENABLE;
7414         tw32_f(MAC_TX_MODE, tp->tx_mode);
7415
7416         for (i = 0; i < MAX_WAIT_CNT; i++) {
7417                 udelay(100);
7418                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7419                         break;
7420         }
7421         if (i >= MAX_WAIT_CNT) {
7422                 dev_err(&tp->pdev->dev,
7423                         "%s timed out, TX_MODE_ENABLE will not clear "
7424                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7425                 err |= -ENODEV;
7426         }
7427
7428         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7429         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7430         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7431
7432         tw32(FTQ_RESET, 0xffffffff);
7433         tw32(FTQ_RESET, 0x00000000);
7434
7435         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7436         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7437
7438         for (i = 0; i < tp->irq_cnt; i++) {
7439                 struct tg3_napi *tnapi = &tp->napi[i];
7440                 if (tnapi->hw_status)
7441                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7442         }
7443
7444         return err;
7445 }
7446
7447 /* Save PCI command register before chip reset */
7448 static void tg3_save_pci_state(struct tg3 *tp)
7449 {
7450         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7451 }
7452
7453 /* Restore PCI state after chip reset */
7454 static void tg3_restore_pci_state(struct tg3 *tp)
7455 {
7456         u32 val;
7457
7458         /* Re-enable indirect register accesses. */
7459         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7460                                tp->misc_host_ctrl);
7461
7462         /* Set MAX PCI retry to zero. */
7463         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7464         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7465             tg3_flag(tp, PCIX_MODE))
7466                 val |= PCISTATE_RETRY_SAME_DMA;
7467         /* Allow reads and writes to the APE register and memory space. */
7468         if (tg3_flag(tp, ENABLE_APE))
7469                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7470                        PCISTATE_ALLOW_APE_SHMEM_WR |
7471                        PCISTATE_ALLOW_APE_PSPACE_WR;
7472         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7473
7474         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7475
7476         if (!tg3_flag(tp, PCI_EXPRESS)) {
7477                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7478                                       tp->pci_cacheline_sz);
7479                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7480                                       tp->pci_lat_timer);
7481         }
7482
7483         /* Make sure PCI-X relaxed ordering bit is clear. */
7484         if (tg3_flag(tp, PCIX_MODE)) {
7485                 u16 pcix_cmd;
7486
7487                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7488                                      &pcix_cmd);
7489                 pcix_cmd &= ~PCI_X_CMD_ERO;
7490                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7491                                       pcix_cmd);
7492         }
7493
7494         if (tg3_flag(tp, 5780_CLASS)) {
7495
7496                 /* Chip reset on 5780 will reset MSI enable bit,
7497                  * so need to restore it.
7498                  */
7499                 if (tg3_flag(tp, USING_MSI)) {
7500                         u16 ctrl;
7501
7502                         pci_read_config_word(tp->pdev,
7503                                              tp->msi_cap + PCI_MSI_FLAGS,
7504                                              &ctrl);
7505                         pci_write_config_word(tp->pdev,
7506                                               tp->msi_cap + PCI_MSI_FLAGS,
7507                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7508                         val = tr32(MSGINT_MODE);
7509                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7510                 }
7511         }
7512 }
7513
7514 /* tp->lock is held. */
7515 static int tg3_chip_reset(struct tg3 *tp)
7516 {
7517         u32 val;
7518         void (*write_op)(struct tg3 *, u32, u32);
7519         int i, err;
7520
7521         tg3_nvram_lock(tp);
7522
7523         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7524
7525         /* No matching tg3_nvram_unlock() after this because
7526          * chip reset below will undo the nvram lock.
7527          */
7528         tp->nvram_lock_cnt = 0;
7529
7530         /* GRC_MISC_CFG core clock reset will clear the memory
7531          * enable bit in PCI register 4 and the MSI enable bit
7532          * on some chips, so we save relevant registers here.
7533          */
7534         tg3_save_pci_state(tp);
7535
7536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7537             tg3_flag(tp, 5755_PLUS))
7538                 tw32(GRC_FASTBOOT_PC, 0);
7539
7540         /*
7541          * We must avoid the readl() that normally takes place.
7542          * It locks machines, causes machine checks, and other
7543          * fun things.  So, temporarily disable the 5701
7544          * hardware workaround, while we do the reset.
7545          */
7546         write_op = tp->write32;
7547         if (write_op == tg3_write_flush_reg32)
7548                 tp->write32 = tg3_write32;
7549
7550         /* Prevent the irq handler from reading or writing PCI registers
7551          * during chip reset when the memory enable bit in the PCI command
7552          * register may be cleared.  The chip does not generate interrupt
7553          * at this time, but the irq handler may still be called due to irq
7554          * sharing or irqpoll.
7555          */
7556         tg3_flag_set(tp, CHIP_RESETTING);
7557         for (i = 0; i < tp->irq_cnt; i++) {
7558                 struct tg3_napi *tnapi = &tp->napi[i];
7559                 if (tnapi->hw_status) {
7560                         tnapi->hw_status->status = 0;
7561                         tnapi->hw_status->status_tag = 0;
7562                 }
7563                 tnapi->last_tag = 0;
7564                 tnapi->last_irq_tag = 0;
7565         }
7566         smp_mb();
7567
7568         for (i = 0; i < tp->irq_cnt; i++)
7569                 synchronize_irq(tp->napi[i].irq_vec);
7570
7571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7572                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7573                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7574         }
7575
7576         /* do the reset */
7577         val = GRC_MISC_CFG_CORECLK_RESET;
7578
7579         if (tg3_flag(tp, PCI_EXPRESS)) {
7580                 /* Force PCIe 1.0a mode */
7581                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7582                     !tg3_flag(tp, 57765_PLUS) &&
7583                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7584                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7585                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7586
7587                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7588                         tw32(GRC_MISC_CFG, (1 << 29));
7589                         val |= (1 << 29);
7590                 }
7591         }
7592
7593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7594                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7595                 tw32(GRC_VCPU_EXT_CTRL,
7596                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7597         }
7598
7599         /* Manage gphy power for all CPMU absent PCIe devices. */
7600         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7601                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7602
7603         tw32(GRC_MISC_CFG, val);
7604
7605         /* restore 5701 hardware bug workaround write method */
7606         tp->write32 = write_op;
7607
7608         /* Unfortunately, we have to delay before the PCI read back.
7609          * Some 575X chips even will not respond to a PCI cfg access
7610          * when the reset command is given to the chip.
7611          *
7612          * How do these hardware designers expect things to work
7613          * properly if the PCI write is posted for a long period
7614          * of time?  It is always necessary to have some method by
7615          * which a register read back can occur to push the write
7616          * out which does the reset.
7617          *
7618          * For most tg3 variants the trick below was working.
7619          * Ho hum...
7620          */
7621         udelay(120);
7622
7623         /* Flush PCI posted writes.  The normal MMIO registers
7624          * are inaccessible at this time so this is the only
7625          * way to make this reliably (actually, this is no longer
7626          * the case, see above).  I tried to use indirect
7627          * register read/write but this upset some 5701 variants.
7628          */
7629         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7630
7631         udelay(120);
7632
7633         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7634                 u16 val16;
7635
7636                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7637                         int i;
7638                         u32 cfg_val;
7639
7640                         /* Wait for link training to complete.  */
7641                         for (i = 0; i < 5000; i++)
7642                                 udelay(100);
7643
7644                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7645                         pci_write_config_dword(tp->pdev, 0xc4,
7646                                                cfg_val | (1 << 15));
7647                 }
7648
7649                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7650                 pci_read_config_word(tp->pdev,
7651                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7652                                      &val16);
7653                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7654                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7655                 /*
7656                  * Older PCIe devices only support the 128 byte
7657                  * MPS setting.  Enforce the restriction.
7658                  */
7659                 if (!tg3_flag(tp, CPMU_PRESENT))
7660                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7661                 pci_write_config_word(tp->pdev,
7662                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7663                                       val16);
7664
7665                 /* Clear error status */
7666                 pci_write_config_word(tp->pdev,
7667                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7668                                       PCI_EXP_DEVSTA_CED |
7669                                       PCI_EXP_DEVSTA_NFED |
7670                                       PCI_EXP_DEVSTA_FED |
7671                                       PCI_EXP_DEVSTA_URD);
7672         }
7673
7674         tg3_restore_pci_state(tp);
7675
7676         tg3_flag_clear(tp, CHIP_RESETTING);
7677         tg3_flag_clear(tp, ERROR_PROCESSED);
7678
7679         val = 0;
7680         if (tg3_flag(tp, 5780_CLASS))
7681                 val = tr32(MEMARB_MODE);
7682         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7683
7684         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7685                 tg3_stop_fw(tp);
7686                 tw32(0x5000, 0x400);
7687         }
7688
7689         tw32(GRC_MODE, tp->grc_mode);
7690
7691         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7692                 val = tr32(0xc4);
7693
7694                 tw32(0xc4, val | (1 << 15));
7695         }
7696
7697         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7699                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7700                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7701                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7702                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7703         }
7704
7705         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7706                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7707                 val = tp->mac_mode;
7708         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7709                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7710                 val = tp->mac_mode;
7711         } else
7712                 val = 0;
7713
7714         tw32_f(MAC_MODE, val);
7715         udelay(40);
7716
7717         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7718
7719         err = tg3_poll_fw(tp);
7720         if (err)
7721                 return err;
7722
7723         tg3_mdio_start(tp);
7724
7725         if (tg3_flag(tp, PCI_EXPRESS) &&
7726             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7727             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7728             !tg3_flag(tp, 57765_PLUS)) {
7729                 val = tr32(0x7c00);
7730
7731                 tw32(0x7c00, val | (1 << 25));
7732         }
7733
7734         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7735                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7736                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7737         }
7738
7739         /* Reprobe ASF enable state.  */
7740         tg3_flag_clear(tp, ENABLE_ASF);
7741         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7742         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7743         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7744                 u32 nic_cfg;
7745
7746                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7747                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7748                         tg3_flag_set(tp, ENABLE_ASF);
7749                         tp->last_event_jiffies = jiffies;
7750                         if (tg3_flag(tp, 5750_PLUS))
7751                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7752                 }
7753         }
7754
7755         return 0;
7756 }
7757
7758 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7759                                                  struct rtnl_link_stats64 *);
7760 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7761                                                 struct tg3_ethtool_stats *);
7762
7763 /* tp->lock is held. */
7764 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7765 {
7766         int err;
7767
7768         tg3_stop_fw(tp);
7769
7770         tg3_write_sig_pre_reset(tp, kind);
7771
7772         tg3_abort_hw(tp, silent);
7773         err = tg3_chip_reset(tp);
7774
7775         __tg3_set_mac_addr(tp, 0);
7776
7777         tg3_write_sig_legacy(tp, kind);
7778         tg3_write_sig_post_reset(tp, kind);
7779
7780         if (tp->hw_stats) {
7781                 /* Save the stats across chip resets... */
7782                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7783                 tg3_get_estats(tp, &tp->estats_prev);
7784
7785                 /* And make sure the next sample is new data */
7786                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7787         }
7788
7789         if (err)
7790                 return err;
7791
7792         return 0;
7793 }
7794
7795 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7796 {
7797         struct tg3 *tp = netdev_priv(dev);
7798         struct sockaddr *addr = p;
7799         int err = 0, skip_mac_1 = 0;
7800
7801         if (!is_valid_ether_addr(addr->sa_data))
7802                 return -EINVAL;
7803
7804         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7805
7806         if (!netif_running(dev))
7807                 return 0;
7808
7809         if (tg3_flag(tp, ENABLE_ASF)) {
7810                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7811
7812                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7813                 addr0_low = tr32(MAC_ADDR_0_LOW);
7814                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7815                 addr1_low = tr32(MAC_ADDR_1_LOW);
7816
7817                 /* Skip MAC addr 1 if ASF is using it. */
7818                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7819                     !(addr1_high == 0 && addr1_low == 0))
7820                         skip_mac_1 = 1;
7821         }
7822         spin_lock_bh(&tp->lock);
7823         __tg3_set_mac_addr(tp, skip_mac_1);
7824         spin_unlock_bh(&tp->lock);
7825
7826         return err;
7827 }
7828
7829 /* tp->lock is held. */
7830 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7831                            dma_addr_t mapping, u32 maxlen_flags,
7832                            u32 nic_addr)
7833 {
7834         tg3_write_mem(tp,
7835                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7836                       ((u64) mapping >> 32));
7837         tg3_write_mem(tp,
7838                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7839                       ((u64) mapping & 0xffffffff));
7840         tg3_write_mem(tp,
7841                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7842                        maxlen_flags);
7843
7844         if (!tg3_flag(tp, 5705_PLUS))
7845                 tg3_write_mem(tp,
7846                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7847                               nic_addr);
7848 }
7849
7850 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7851 {
7852         int i;
7853
7854         if (!tg3_flag(tp, ENABLE_TSS)) {
7855                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7856                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7857                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7858         } else {
7859                 tw32(HOSTCC_TXCOL_TICKS, 0);
7860                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7861                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7862         }
7863
7864         if (!tg3_flag(tp, ENABLE_RSS)) {
7865                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7866                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7867                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7868         } else {
7869                 tw32(HOSTCC_RXCOL_TICKS, 0);
7870                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7871                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7872         }
7873
7874         if (!tg3_flag(tp, 5705_PLUS)) {
7875                 u32 val = ec->stats_block_coalesce_usecs;
7876
7877                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7878                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7879
7880                 if (!netif_carrier_ok(tp->dev))
7881                         val = 0;
7882
7883                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7884         }
7885
7886         for (i = 0; i < tp->irq_cnt - 1; i++) {
7887                 u32 reg;
7888
7889                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7890                 tw32(reg, ec->rx_coalesce_usecs);
7891                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7892                 tw32(reg, ec->rx_max_coalesced_frames);
7893                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7894                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7895
7896                 if (tg3_flag(tp, ENABLE_TSS)) {
7897                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7898                         tw32(reg, ec->tx_coalesce_usecs);
7899                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7900                         tw32(reg, ec->tx_max_coalesced_frames);
7901                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7902                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7903                 }
7904         }
7905
7906         for (; i < tp->irq_max - 1; i++) {
7907                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7908                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7909                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7910
7911                 if (tg3_flag(tp, ENABLE_TSS)) {
7912                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7913                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7914                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7915                 }
7916         }
7917 }
7918
7919 /* tp->lock is held. */
7920 static void tg3_rings_reset(struct tg3 *tp)
7921 {
7922         int i;
7923         u32 stblk, txrcb, rxrcb, limit;
7924         struct tg3_napi *tnapi = &tp->napi[0];
7925
7926         /* Disable all transmit rings but the first. */
7927         if (!tg3_flag(tp, 5705_PLUS))
7928                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7929         else if (tg3_flag(tp, 5717_PLUS))
7930                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7931         else if (tg3_flag(tp, 57765_CLASS))
7932                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7933         else
7934                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7935
7936         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7937              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7938                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7939                               BDINFO_FLAGS_DISABLED);
7940
7941
7942         /* Disable all receive return rings but the first. */
7943         if (tg3_flag(tp, 5717_PLUS))
7944                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7945         else if (!tg3_flag(tp, 5705_PLUS))
7946                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7947         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7948                  tg3_flag(tp, 57765_CLASS))
7949                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7950         else
7951                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7952
7953         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7954              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7955                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7956                               BDINFO_FLAGS_DISABLED);
7957
7958         /* Disable interrupts */
7959         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7960         tp->napi[0].chk_msi_cnt = 0;
7961         tp->napi[0].last_rx_cons = 0;
7962         tp->napi[0].last_tx_cons = 0;
7963
7964         /* Zero mailbox registers. */
7965         if (tg3_flag(tp, SUPPORT_MSIX)) {
7966                 for (i = 1; i < tp->irq_max; i++) {
7967                         tp->napi[i].tx_prod = 0;
7968                         tp->napi[i].tx_cons = 0;
7969                         if (tg3_flag(tp, ENABLE_TSS))
7970                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7971                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7972                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7973                         tp->napi[i].chk_msi_cnt = 0;
7974                         tp->napi[i].last_rx_cons = 0;
7975                         tp->napi[i].last_tx_cons = 0;
7976                 }
7977                 if (!tg3_flag(tp, ENABLE_TSS))
7978                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7979         } else {
7980                 tp->napi[0].tx_prod = 0;
7981                 tp->napi[0].tx_cons = 0;
7982                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7983                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7984         }
7985
7986         /* Make sure the NIC-based send BD rings are disabled. */
7987         if (!tg3_flag(tp, 5705_PLUS)) {
7988                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7989                 for (i = 0; i < 16; i++)
7990                         tw32_tx_mbox(mbox + i * 8, 0);
7991         }
7992
7993         txrcb = NIC_SRAM_SEND_RCB;
7994         rxrcb = NIC_SRAM_RCV_RET_RCB;
7995
7996         /* Clear status block in ram. */
7997         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7998
7999         /* Set status block DMA address */
8000         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8001              ((u64) tnapi->status_mapping >> 32));
8002         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8003              ((u64) tnapi->status_mapping & 0xffffffff));
8004
8005         if (tnapi->tx_ring) {
8006                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8007                                (TG3_TX_RING_SIZE <<
8008                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8009                                NIC_SRAM_TX_BUFFER_DESC);
8010                 txrcb += TG3_BDINFO_SIZE;
8011         }
8012
8013         if (tnapi->rx_rcb) {
8014                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8015                                (tp->rx_ret_ring_mask + 1) <<
8016                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8017                 rxrcb += TG3_BDINFO_SIZE;
8018         }
8019
8020         stblk = HOSTCC_STATBLCK_RING1;
8021
8022         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8023                 u64 mapping = (u64)tnapi->status_mapping;
8024                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8025                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8026
8027                 /* Clear status block in ram. */
8028                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8029
8030                 if (tnapi->tx_ring) {
8031                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8032                                        (TG3_TX_RING_SIZE <<
8033                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8034                                        NIC_SRAM_TX_BUFFER_DESC);
8035                         txrcb += TG3_BDINFO_SIZE;
8036                 }
8037
8038                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8039                                ((tp->rx_ret_ring_mask + 1) <<
8040                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8041
8042                 stblk += 8;
8043                 rxrcb += TG3_BDINFO_SIZE;
8044         }
8045 }
8046
8047 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8048 {
8049         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8050
8051         if (!tg3_flag(tp, 5750_PLUS) ||
8052             tg3_flag(tp, 5780_CLASS) ||
8053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8055             tg3_flag(tp, 57765_PLUS))
8056                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8057         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8058                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8059                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8060         else
8061                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8062
8063         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8064         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8065
8066         val = min(nic_rep_thresh, host_rep_thresh);
8067         tw32(RCVBDI_STD_THRESH, val);
8068
8069         if (tg3_flag(tp, 57765_PLUS))
8070                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8071
8072         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8073                 return;
8074
8075         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8076
8077         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8078
8079         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8080         tw32(RCVBDI_JUMBO_THRESH, val);
8081
8082         if (tg3_flag(tp, 57765_PLUS))
8083                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8084 }
8085
8086 static inline u32 calc_crc(unsigned char *buf, int len)
8087 {
8088         u32 reg;
8089         u32 tmp;
8090         int j, k;
8091
8092         reg = 0xffffffff;
8093
8094         for (j = 0; j < len; j++) {
8095                 reg ^= buf[j];
8096
8097                 for (k = 0; k < 8; k++) {
8098                         tmp = reg & 0x01;
8099
8100                         reg >>= 1;
8101
8102                         if (tmp)
8103                                 reg ^= 0xedb88320;
8104                 }
8105         }
8106
8107         return ~reg;
8108 }
8109
8110 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8111 {
8112         /* accept or reject all multicast frames */
8113         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8114         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8115         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8116         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8117 }
8118
8119 static void __tg3_set_rx_mode(struct net_device *dev)
8120 {
8121         struct tg3 *tp = netdev_priv(dev);
8122         u32 rx_mode;
8123
8124         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8125                                   RX_MODE_KEEP_VLAN_TAG);
8126
8127 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8128         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8129          * flag clear.
8130          */
8131         if (!tg3_flag(tp, ENABLE_ASF))
8132                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8133 #endif
8134
8135         if (dev->flags & IFF_PROMISC) {
8136                 /* Promiscuous mode. */
8137                 rx_mode |= RX_MODE_PROMISC;
8138         } else if (dev->flags & IFF_ALLMULTI) {
8139                 /* Accept all multicast. */
8140                 tg3_set_multi(tp, 1);
8141         } else if (netdev_mc_empty(dev)) {
8142                 /* Reject all multicast. */
8143                 tg3_set_multi(tp, 0);
8144         } else {
8145                 /* Accept one or more multicast(s). */
8146                 struct netdev_hw_addr *ha;
8147                 u32 mc_filter[4] = { 0, };
8148                 u32 regidx;
8149                 u32 bit;
8150                 u32 crc;
8151
8152                 netdev_for_each_mc_addr(ha, dev) {
8153                         crc = calc_crc(ha->addr, ETH_ALEN);
8154                         bit = ~crc & 0x7f;
8155                         regidx = (bit & 0x60) >> 5;
8156                         bit &= 0x1f;
8157                         mc_filter[regidx] |= (1 << bit);
8158                 }
8159
8160                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8161                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8162                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8163                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8164         }
8165
8166         if (rx_mode != tp->rx_mode) {
8167                 tp->rx_mode = rx_mode;
8168                 tw32_f(MAC_RX_MODE, rx_mode);
8169                 udelay(10);
8170         }
8171 }
8172
8173 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8174 {
8175         int i;
8176
8177         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8178                 tp->rss_ind_tbl[i] =
8179                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8180 }
8181
8182 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8183 {
8184         int i;
8185
8186         if (!tg3_flag(tp, SUPPORT_MSIX))
8187                 return;
8188
8189         if (tp->irq_cnt <= 2) {
8190                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8191                 return;
8192         }
8193
8194         /* Validate table against current IRQ count */
8195         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8196                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8197                         break;
8198         }
8199
8200         if (i != TG3_RSS_INDIR_TBL_SIZE)
8201                 tg3_rss_init_dflt_indir_tbl(tp);
8202 }
8203
8204 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8205 {
8206         int i = 0;
8207         u32 reg = MAC_RSS_INDIR_TBL_0;
8208
8209         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8210                 u32 val = tp->rss_ind_tbl[i];
8211                 i++;
8212                 for (; i % 8; i++) {
8213                         val <<= 4;
8214                         val |= tp->rss_ind_tbl[i];
8215                 }
8216                 tw32(reg, val);
8217                 reg += 4;
8218         }
8219 }
8220
8221 /* tp->lock is held. */
8222 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8223 {
8224         u32 val, rdmac_mode;
8225         int i, err, limit;
8226         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8227
8228         tg3_disable_ints(tp);
8229
8230         tg3_stop_fw(tp);
8231
8232         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8233
8234         if (tg3_flag(tp, INIT_COMPLETE))
8235                 tg3_abort_hw(tp, 1);
8236
8237         /* Enable MAC control of LPI */
8238         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8239                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8240                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8241                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8242
8243                 tw32_f(TG3_CPMU_EEE_CTRL,
8244                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8245
8246                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8247                       TG3_CPMU_EEEMD_LPI_IN_TX |
8248                       TG3_CPMU_EEEMD_LPI_IN_RX |
8249                       TG3_CPMU_EEEMD_EEE_ENABLE;
8250
8251                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8252                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8253
8254                 if (tg3_flag(tp, ENABLE_APE))
8255                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8256
8257                 tw32_f(TG3_CPMU_EEE_MODE, val);
8258
8259                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8260                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8261                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8262
8263                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8264                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8265                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8266         }
8267
8268         if (reset_phy)
8269                 tg3_phy_reset(tp);
8270
8271         err = tg3_chip_reset(tp);
8272         if (err)
8273                 return err;
8274
8275         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8276
8277         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8278                 val = tr32(TG3_CPMU_CTRL);
8279                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8280                 tw32(TG3_CPMU_CTRL, val);
8281
8282                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8283                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8284                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8285                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8286
8287                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8288                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8289                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8290                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8291
8292                 val = tr32(TG3_CPMU_HST_ACC);
8293                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8294                 val |= CPMU_HST_ACC_MACCLK_6_25;
8295                 tw32(TG3_CPMU_HST_ACC, val);
8296         }
8297
8298         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8299                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8300                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8301                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8302                 tw32(PCIE_PWR_MGMT_THRESH, val);
8303
8304                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8305                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8306
8307                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8308
8309                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8310                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8311         }
8312
8313         if (tg3_flag(tp, L1PLLPD_EN)) {
8314                 u32 grc_mode = tr32(GRC_MODE);
8315
8316                 /* Access the lower 1K of PL PCIE block registers. */
8317                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8318                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8319
8320                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8321                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8322                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8323
8324                 tw32(GRC_MODE, grc_mode);
8325         }
8326
8327         if (tg3_flag(tp, 57765_CLASS)) {
8328                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8329                         u32 grc_mode = tr32(GRC_MODE);
8330
8331                         /* Access the lower 1K of PL PCIE block registers. */
8332                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8333                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8334
8335                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8336                                    TG3_PCIE_PL_LO_PHYCTL5);
8337                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8338                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8339
8340                         tw32(GRC_MODE, grc_mode);
8341                 }
8342
8343                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8344                         u32 grc_mode = tr32(GRC_MODE);
8345
8346                         /* Access the lower 1K of DL PCIE block registers. */
8347                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8348                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8349
8350                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8351                                    TG3_PCIE_DL_LO_FTSMAX);
8352                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8353                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8354                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8355
8356                         tw32(GRC_MODE, grc_mode);
8357                 }
8358
8359                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8360                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8361                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8362                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8363         }
8364
8365         /* This works around an issue with Athlon chipsets on
8366          * B3 tigon3 silicon.  This bit has no effect on any
8367          * other revision.  But do not set this on PCI Express
8368          * chips and don't even touch the clocks if the CPMU is present.
8369          */
8370         if (!tg3_flag(tp, CPMU_PRESENT)) {
8371                 if (!tg3_flag(tp, PCI_EXPRESS))
8372                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8373                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8374         }
8375
8376         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8377             tg3_flag(tp, PCIX_MODE)) {
8378                 val = tr32(TG3PCI_PCISTATE);
8379                 val |= PCISTATE_RETRY_SAME_DMA;
8380                 tw32(TG3PCI_PCISTATE, val);
8381         }
8382
8383         if (tg3_flag(tp, ENABLE_APE)) {
8384                 /* Allow reads and writes to the
8385                  * APE register and memory space.
8386                  */
8387                 val = tr32(TG3PCI_PCISTATE);
8388                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8389                        PCISTATE_ALLOW_APE_SHMEM_WR |
8390                        PCISTATE_ALLOW_APE_PSPACE_WR;
8391                 tw32(TG3PCI_PCISTATE, val);
8392         }
8393
8394         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8395                 /* Enable some hw fixes.  */
8396                 val = tr32(TG3PCI_MSI_DATA);
8397                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8398                 tw32(TG3PCI_MSI_DATA, val);
8399         }
8400
8401         /* Descriptor ring init may make accesses to the
8402          * NIC SRAM area to setup the TX descriptors, so we
8403          * can only do this after the hardware has been
8404          * successfully reset.
8405          */
8406         err = tg3_init_rings(tp);
8407         if (err)
8408                 return err;
8409
8410         if (tg3_flag(tp, 57765_PLUS)) {
8411                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8412                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8413                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8414                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8415                 if (!tg3_flag(tp, 57765_CLASS) &&
8416                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8417                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8418                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8419         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8420                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8421                 /* This value is determined during the probe time DMA
8422                  * engine test, tg3_test_dma.
8423                  */
8424                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8425         }
8426
8427         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8428                           GRC_MODE_4X_NIC_SEND_RINGS |
8429                           GRC_MODE_NO_TX_PHDR_CSUM |
8430                           GRC_MODE_NO_RX_PHDR_CSUM);
8431         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8432
8433         /* Pseudo-header checksum is done by hardware logic and not
8434          * the offload processers, so make the chip do the pseudo-
8435          * header checksums on receive.  For transmit it is more
8436          * convenient to do the pseudo-header checksum in software
8437          * as Linux does that on transmit for us in all cases.
8438          */
8439         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8440
8441         tw32(GRC_MODE,
8442              tp->grc_mode |
8443              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8444
8445         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8446         val = tr32(GRC_MISC_CFG);
8447         val &= ~0xff;
8448         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8449         tw32(GRC_MISC_CFG, val);
8450
8451         /* Initialize MBUF/DESC pool. */
8452         if (tg3_flag(tp, 5750_PLUS)) {
8453                 /* Do nothing.  */
8454         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8455                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8456                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8457                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8458                 else
8459                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8460                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8461                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8462         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8463                 int fw_len;
8464
8465                 fw_len = tp->fw_len;
8466                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8467                 tw32(BUFMGR_MB_POOL_ADDR,
8468                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8469                 tw32(BUFMGR_MB_POOL_SIZE,
8470                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8471         }
8472
8473         if (tp->dev->mtu <= ETH_DATA_LEN) {
8474                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8475                      tp->bufmgr_config.mbuf_read_dma_low_water);
8476                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8477                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8478                 tw32(BUFMGR_MB_HIGH_WATER,
8479                      tp->bufmgr_config.mbuf_high_water);
8480         } else {
8481                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8482                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8483                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8484                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8485                 tw32(BUFMGR_MB_HIGH_WATER,
8486                      tp->bufmgr_config.mbuf_high_water_jumbo);
8487         }
8488         tw32(BUFMGR_DMA_LOW_WATER,
8489              tp->bufmgr_config.dma_low_water);
8490         tw32(BUFMGR_DMA_HIGH_WATER,
8491              tp->bufmgr_config.dma_high_water);
8492
8493         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8494         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8495                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8497             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8498             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8499                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8500         tw32(BUFMGR_MODE, val);
8501         for (i = 0; i < 2000; i++) {
8502                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8503                         break;
8504                 udelay(10);
8505         }
8506         if (i >= 2000) {
8507                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8508                 return -ENODEV;
8509         }
8510
8511         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8512                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8513
8514         tg3_setup_rxbd_thresholds(tp);
8515
8516         /* Initialize TG3_BDINFO's at:
8517          *  RCVDBDI_STD_BD:     standard eth size rx ring
8518          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8519          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8520          *
8521          * like so:
8522          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8523          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8524          *                              ring attribute flags
8525          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8526          *
8527          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8528          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8529          *
8530          * The size of each ring is fixed in the firmware, but the location is
8531          * configurable.
8532          */
8533         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8534              ((u64) tpr->rx_std_mapping >> 32));
8535         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8536              ((u64) tpr->rx_std_mapping & 0xffffffff));
8537         if (!tg3_flag(tp, 5717_PLUS))
8538                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8539                      NIC_SRAM_RX_BUFFER_DESC);
8540
8541         /* Disable the mini ring */
8542         if (!tg3_flag(tp, 5705_PLUS))
8543                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8544                      BDINFO_FLAGS_DISABLED);
8545
8546         /* Program the jumbo buffer descriptor ring control
8547          * blocks on those devices that have them.
8548          */
8549         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8550             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8551
8552                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8553                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8554                              ((u64) tpr->rx_jmb_mapping >> 32));
8555                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8556                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8557                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8558                               BDINFO_FLAGS_MAXLEN_SHIFT;
8559                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8560                              val | BDINFO_FLAGS_USE_EXT_RECV);
8561                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8562                             tg3_flag(tp, 57765_CLASS))
8563                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8564                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8565                 } else {
8566                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8567                              BDINFO_FLAGS_DISABLED);
8568                 }
8569
8570                 if (tg3_flag(tp, 57765_PLUS)) {
8571                         val = TG3_RX_STD_RING_SIZE(tp);
8572                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8573                         val |= (TG3_RX_STD_DMA_SZ << 2);
8574                 } else
8575                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8576         } else
8577                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8578
8579         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8580
8581         tpr->rx_std_prod_idx = tp->rx_pending;
8582         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8583
8584         tpr->rx_jmb_prod_idx =
8585                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8586         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8587
8588         tg3_rings_reset(tp);
8589
8590         /* Initialize MAC address and backoff seed. */
8591         __tg3_set_mac_addr(tp, 0);
8592
8593         /* MTU + ethernet header + FCS + optional VLAN tag */
8594         tw32(MAC_RX_MTU_SIZE,
8595              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8596
8597         /* The slot time is changed by tg3_setup_phy if we
8598          * run at gigabit with half duplex.
8599          */
8600         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8601               (6 << TX_LENGTHS_IPG_SHIFT) |
8602               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8603
8604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8605                 val |= tr32(MAC_TX_LENGTHS) &
8606                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8607                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8608
8609         tw32(MAC_TX_LENGTHS, val);
8610
8611         /* Receive rules. */
8612         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8613         tw32(RCVLPC_CONFIG, 0x0181);
8614
8615         /* Calculate RDMAC_MODE setting early, we need it to determine
8616          * the RCVLPC_STATE_ENABLE mask.
8617          */
8618         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8619                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8620                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8621                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8622                       RDMAC_MODE_LNGREAD_ENAB);
8623
8624         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8625                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8626
8627         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8628             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8629             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8630                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8631                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8632                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8633
8634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8635             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8636                 if (tg3_flag(tp, TSO_CAPABLE) &&
8637                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8638                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8639                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8640                            !tg3_flag(tp, IS_5788)) {
8641                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8642                 }
8643         }
8644
8645         if (tg3_flag(tp, PCI_EXPRESS))
8646                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8647
8648         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8649                 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8650
8651         if (tg3_flag(tp, HW_TSO_1) ||
8652             tg3_flag(tp, HW_TSO_2) ||
8653             tg3_flag(tp, HW_TSO_3))
8654                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8655
8656         if (tg3_flag(tp, 57765_PLUS) ||
8657             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8658             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8659                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8660
8661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8662                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8663
8664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8666             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8667             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8668             tg3_flag(tp, 57765_PLUS)) {
8669                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8670                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8671                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8672                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8673                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8674                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8675                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8676                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8677                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8678                 }
8679                 tw32(TG3_RDMA_RSRVCTRL_REG,
8680                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8681         }
8682
8683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8684             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8685                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8686                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8687                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8688                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8689         }
8690
8691         /* Receive/send statistics. */
8692         if (tg3_flag(tp, 5750_PLUS)) {
8693                 val = tr32(RCVLPC_STATS_ENABLE);
8694                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8695                 tw32(RCVLPC_STATS_ENABLE, val);
8696         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8697                    tg3_flag(tp, TSO_CAPABLE)) {
8698                 val = tr32(RCVLPC_STATS_ENABLE);
8699                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8700                 tw32(RCVLPC_STATS_ENABLE, val);
8701         } else {
8702                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8703         }
8704         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8705         tw32(SNDDATAI_STATSENAB, 0xffffff);
8706         tw32(SNDDATAI_STATSCTRL,
8707              (SNDDATAI_SCTRL_ENABLE |
8708               SNDDATAI_SCTRL_FASTUPD));
8709
8710         /* Setup host coalescing engine. */
8711         tw32(HOSTCC_MODE, 0);
8712         for (i = 0; i < 2000; i++) {
8713                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8714                         break;
8715                 udelay(10);
8716         }
8717
8718         __tg3_set_coalesce(tp, &tp->coal);
8719
8720         if (!tg3_flag(tp, 5705_PLUS)) {
8721                 /* Status/statistics block address.  See tg3_timer,
8722                  * the tg3_periodic_fetch_stats call there, and
8723                  * tg3_get_stats to see how this works for 5705/5750 chips.
8724                  */
8725                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8726                      ((u64) tp->stats_mapping >> 32));
8727                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8728                      ((u64) tp->stats_mapping & 0xffffffff));
8729                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8730
8731                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8732
8733                 /* Clear statistics and status block memory areas */
8734                 for (i = NIC_SRAM_STATS_BLK;
8735                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8736                      i += sizeof(u32)) {
8737                         tg3_write_mem(tp, i, 0);
8738                         udelay(40);
8739                 }
8740         }
8741
8742         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8743
8744         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8745         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8746         if (!tg3_flag(tp, 5705_PLUS))
8747                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8748
8749         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8750                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8751                 /* reset to prevent losing 1st rx packet intermittently */
8752                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8753                 udelay(10);
8754         }
8755
8756         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8757                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8758                         MAC_MODE_FHDE_ENABLE;
8759         if (tg3_flag(tp, ENABLE_APE))
8760                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8761         if (!tg3_flag(tp, 5705_PLUS) &&
8762             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8763             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8764                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8765         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8766         udelay(40);
8767
8768         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8769          * If TG3_FLAG_IS_NIC is zero, we should read the
8770          * register to preserve the GPIO settings for LOMs. The GPIOs,
8771          * whether used as inputs or outputs, are set by boot code after
8772          * reset.
8773          */
8774         if (!tg3_flag(tp, IS_NIC)) {
8775                 u32 gpio_mask;
8776
8777                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8778                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8779                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8780
8781                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8782                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8783                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8784
8785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8786                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8787
8788                 tp->grc_local_ctrl &= ~gpio_mask;
8789                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8790
8791                 /* GPIO1 must be driven high for eeprom write protect */
8792                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8793                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8794                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8795         }
8796         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8797         udelay(100);
8798
8799         if (tg3_flag(tp, USING_MSIX)) {
8800                 val = tr32(MSGINT_MODE);
8801                 val |= MSGINT_MODE_ENABLE;
8802                 if (tp->irq_cnt > 1)
8803                         val |= MSGINT_MODE_MULTIVEC_EN;
8804                 if (!tg3_flag(tp, 1SHOT_MSI))
8805                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8806                 tw32(MSGINT_MODE, val);
8807         }
8808
8809         if (!tg3_flag(tp, 5705_PLUS)) {
8810                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8811                 udelay(40);
8812         }
8813
8814         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8815                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8816                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8817                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8818                WDMAC_MODE_LNGREAD_ENAB);
8819
8820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8821             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8822                 if (tg3_flag(tp, TSO_CAPABLE) &&
8823                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8824                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8825                         /* nothing */
8826                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8827                            !tg3_flag(tp, IS_5788)) {
8828                         val |= WDMAC_MODE_RX_ACCEL;
8829                 }
8830         }
8831
8832         /* Enable host coalescing bug fix */
8833         if (tg3_flag(tp, 5755_PLUS))
8834                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8835
8836         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8837                 val |= WDMAC_MODE_BURST_ALL_DATA;
8838
8839         tw32_f(WDMAC_MODE, val);
8840         udelay(40);
8841
8842         if (tg3_flag(tp, PCIX_MODE)) {
8843                 u16 pcix_cmd;
8844
8845                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8846                                      &pcix_cmd);
8847                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8848                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8849                         pcix_cmd |= PCI_X_CMD_READ_2K;
8850                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8851                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8852                         pcix_cmd |= PCI_X_CMD_READ_2K;
8853                 }
8854                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8855                                       pcix_cmd);
8856         }
8857
8858         tw32_f(RDMAC_MODE, rdmac_mode);
8859         udelay(40);
8860
8861         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8862         if (!tg3_flag(tp, 5705_PLUS))
8863                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8864
8865         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8866                 tw32(SNDDATAC_MODE,
8867                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8868         else
8869                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8870
8871         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8872         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8873         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8874         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8875                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8876         tw32(RCVDBDI_MODE, val);
8877         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8878         if (tg3_flag(tp, HW_TSO_1) ||
8879             tg3_flag(tp, HW_TSO_2) ||
8880             tg3_flag(tp, HW_TSO_3))
8881                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8882         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8883         if (tg3_flag(tp, ENABLE_TSS))
8884                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8885         tw32(SNDBDI_MODE, val);
8886         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8887
8888         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8889                 err = tg3_load_5701_a0_firmware_fix(tp);
8890                 if (err)
8891                         return err;
8892         }
8893
8894         if (tg3_flag(tp, TSO_CAPABLE)) {
8895                 err = tg3_load_tso_firmware(tp);
8896                 if (err)
8897                         return err;
8898         }
8899
8900         tp->tx_mode = TX_MODE_ENABLE;
8901
8902         if (tg3_flag(tp, 5755_PLUS) ||
8903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8904                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8905
8906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8907                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8908                 tp->tx_mode &= ~val;
8909                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8910         }
8911
8912         tw32_f(MAC_TX_MODE, tp->tx_mode);
8913         udelay(100);
8914
8915         if (tg3_flag(tp, ENABLE_RSS)) {
8916                 tg3_rss_write_indir_tbl(tp);
8917
8918                 /* Setup the "secret" hash key. */
8919                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8920                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8921                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8922                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8923                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8924                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8925                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8926                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8927                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8928                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8929         }
8930
8931         tp->rx_mode = RX_MODE_ENABLE;
8932         if (tg3_flag(tp, 5755_PLUS))
8933                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8934
8935         if (tg3_flag(tp, ENABLE_RSS))
8936                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8937                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8938                                RX_MODE_RSS_IPV6_HASH_EN |
8939                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8940                                RX_MODE_RSS_IPV4_HASH_EN |
8941                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8942
8943         tw32_f(MAC_RX_MODE, tp->rx_mode);
8944         udelay(10);
8945
8946         tw32(MAC_LED_CTRL, tp->led_ctrl);
8947
8948         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8949         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8950                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8951                 udelay(10);
8952         }
8953         tw32_f(MAC_RX_MODE, tp->rx_mode);
8954         udelay(10);
8955
8956         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8957                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8958                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8959                         /* Set drive transmission level to 1.2V  */
8960                         /* only if the signal pre-emphasis bit is not set  */
8961                         val = tr32(MAC_SERDES_CFG);
8962                         val &= 0xfffff000;
8963                         val |= 0x880;
8964                         tw32(MAC_SERDES_CFG, val);
8965                 }
8966                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8967                         tw32(MAC_SERDES_CFG, 0x616000);
8968         }
8969
8970         /* Prevent chip from dropping frames when flow control
8971          * is enabled.
8972          */
8973         if (tg3_flag(tp, 57765_CLASS))
8974                 val = 1;
8975         else
8976                 val = 2;
8977         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8978
8979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8980             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8981                 /* Use hardware link auto-negotiation */
8982                 tg3_flag_set(tp, HW_AUTONEG);
8983         }
8984
8985         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8986             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8987                 u32 tmp;
8988
8989                 tmp = tr32(SERDES_RX_CTRL);
8990                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8991                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8992                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8993                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8994         }
8995
8996         if (!tg3_flag(tp, USE_PHYLIB)) {
8997                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8998                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8999                         tp->link_config.speed = tp->link_config.orig_speed;
9000                         tp->link_config.duplex = tp->link_config.orig_duplex;
9001                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9002                 }
9003
9004                 err = tg3_setup_phy(tp, 0);
9005                 if (err)
9006                         return err;
9007
9008                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9009                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9010                         u32 tmp;
9011
9012                         /* Clear CRC stats. */
9013                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9014                                 tg3_writephy(tp, MII_TG3_TEST1,
9015                                              tmp | MII_TG3_TEST1_CRC_EN);
9016                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9017                         }
9018                 }
9019         }
9020
9021         __tg3_set_rx_mode(tp->dev);
9022
9023         /* Initialize receive rules. */
9024         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9025         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9026         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9027         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9028
9029         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9030                 limit = 8;
9031         else
9032                 limit = 16;
9033         if (tg3_flag(tp, ENABLE_ASF))
9034                 limit -= 4;
9035         switch (limit) {
9036         case 16:
9037                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9038         case 15:
9039                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9040         case 14:
9041                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9042         case 13:
9043                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9044         case 12:
9045                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9046         case 11:
9047                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9048         case 10:
9049                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9050         case 9:
9051                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9052         case 8:
9053                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9054         case 7:
9055                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9056         case 6:
9057                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9058         case 5:
9059                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9060         case 4:
9061                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9062         case 3:
9063                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9064         case 2:
9065         case 1:
9066
9067         default:
9068                 break;
9069         }
9070
9071         if (tg3_flag(tp, ENABLE_APE))
9072                 /* Write our heartbeat update interval to APE. */
9073                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9074                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9075
9076         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9077
9078         return 0;
9079 }
9080
9081 /* Called at device open time to get the chip ready for
9082  * packet processing.  Invoked with tp->lock held.
9083  */
9084 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9085 {
9086         tg3_switch_clocks(tp);
9087
9088         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9089
9090         return tg3_reset_hw(tp, reset_phy);
9091 }
9092
9093 /* Restart hardware after configuration changes, self-test, etc.
9094  * Invoked with tp->lock held.
9095  */
9096 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9097         __releases(tp->lock)
9098         __acquires(tp->lock)
9099 {
9100         int err;
9101
9102         err = tg3_init_hw(tp, reset_phy);
9103         if (err) {
9104                 netdev_err(tp->dev,
9105                            "Failed to re-initialize device, aborting\n");
9106                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9107                 tg3_full_unlock(tp);
9108                 del_timer_sync(&tp->timer);
9109                 tp->irq_sync = 0;
9110                 tg3_napi_enable(tp);
9111                 dev_close(tp->dev);
9112                 tg3_full_lock(tp, 0);
9113         }
9114         return err;
9115 }
9116
9117 static void tg3_reset_task(struct work_struct *work)
9118 {
9119         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9120         int err;
9121
9122         tg3_full_lock(tp, 0);
9123
9124         if (!netif_running(tp->dev)) {
9125                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9126                 tg3_full_unlock(tp);
9127                 return;
9128         }
9129
9130         tg3_full_unlock(tp);
9131
9132         tg3_phy_stop(tp);
9133
9134         tg3_netif_stop(tp);
9135
9136         tg3_full_lock(tp, 1);
9137
9138         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9139                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9140                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9141                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9142                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9143         }
9144
9145         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9146         err = tg3_init_hw(tp, 1);
9147         if (err)
9148                 goto out;
9149
9150         tg3_netif_start(tp);
9151
9152 out:
9153         tg3_full_unlock(tp);
9154
9155         if (!err)
9156                 tg3_phy_start(tp);
9157
9158         tg3_flag_clear(tp, RESET_TASK_PENDING);
9159 }
9160
9161 #define TG3_STAT_ADD32(PSTAT, REG) \
9162 do {    u32 __val = tr32(REG); \
9163         (PSTAT)->low += __val; \
9164         if ((PSTAT)->low < __val) \
9165                 (PSTAT)->high += 1; \
9166 } while (0)
9167
9168 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9169 {
9170         struct tg3_hw_stats *sp = tp->hw_stats;
9171
9172         if (!netif_carrier_ok(tp->dev))
9173                 return;
9174
9175         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9176         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9177         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9178         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9179         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9180         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9181         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9182         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9183         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9184         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9185         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9186         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9187         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9188
9189         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9190         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9191         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9192         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9193         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9194         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9195         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9196         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9197         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9198         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9199         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9200         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9201         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9202         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9203
9204         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9205         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9206             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9207             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9208                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9209         } else {
9210                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9211                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9212                 if (val) {
9213                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9214                         sp->rx_discards.low += val;
9215                         if (sp->rx_discards.low < val)
9216                                 sp->rx_discards.high += 1;
9217                 }
9218                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9219         }
9220         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9221 }
9222
9223 static void tg3_chk_missed_msi(struct tg3 *tp)
9224 {
9225         u32 i;
9226
9227         for (i = 0; i < tp->irq_cnt; i++) {
9228                 struct tg3_napi *tnapi = &tp->napi[i];
9229
9230                 if (tg3_has_work(tnapi)) {
9231                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9232                             tnapi->last_tx_cons == tnapi->tx_cons) {
9233                                 if (tnapi->chk_msi_cnt < 1) {
9234                                         tnapi->chk_msi_cnt++;
9235                                         return;
9236                                 }
9237                                 tg3_msi(0, tnapi);
9238                         }
9239                 }
9240                 tnapi->chk_msi_cnt = 0;
9241                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9242                 tnapi->last_tx_cons = tnapi->tx_cons;
9243         }
9244 }
9245
9246 static void tg3_timer(unsigned long __opaque)
9247 {
9248         struct tg3 *tp = (struct tg3 *) __opaque;
9249
9250         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9251                 goto restart_timer;
9252
9253         spin_lock(&tp->lock);
9254
9255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9256             tg3_flag(tp, 57765_CLASS))
9257                 tg3_chk_missed_msi(tp);
9258
9259         if (!tg3_flag(tp, TAGGED_STATUS)) {
9260                 /* All of this garbage is because when using non-tagged
9261                  * IRQ status the mailbox/status_block protocol the chip
9262                  * uses with the cpu is race prone.
9263                  */
9264                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9265                         tw32(GRC_LOCAL_CTRL,
9266                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9267                 } else {
9268                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9269                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9270                 }
9271
9272                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9273                         spin_unlock(&tp->lock);
9274                         tg3_reset_task_schedule(tp);
9275                         goto restart_timer;
9276                 }
9277         }
9278
9279         /* This part only runs once per second. */
9280         if (!--tp->timer_counter) {
9281                 if (tg3_flag(tp, 5705_PLUS))
9282                         tg3_periodic_fetch_stats(tp);
9283
9284                 if (tp->setlpicnt && !--tp->setlpicnt)
9285                         tg3_phy_eee_enable(tp);
9286
9287                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9288                         u32 mac_stat;
9289                         int phy_event;
9290
9291                         mac_stat = tr32(MAC_STATUS);
9292
9293                         phy_event = 0;
9294                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9295                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9296                                         phy_event = 1;
9297                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9298                                 phy_event = 1;
9299
9300                         if (phy_event)
9301                                 tg3_setup_phy(tp, 0);
9302                 } else if (tg3_flag(tp, POLL_SERDES)) {
9303                         u32 mac_stat = tr32(MAC_STATUS);
9304                         int need_setup = 0;
9305
9306                         if (netif_carrier_ok(tp->dev) &&
9307                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9308                                 need_setup = 1;
9309                         }
9310                         if (!netif_carrier_ok(tp->dev) &&
9311                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9312                                          MAC_STATUS_SIGNAL_DET))) {
9313                                 need_setup = 1;
9314                         }
9315                         if (need_setup) {
9316                                 if (!tp->serdes_counter) {
9317                                         tw32_f(MAC_MODE,
9318                                              (tp->mac_mode &
9319                                               ~MAC_MODE_PORT_MODE_MASK));
9320                                         udelay(40);
9321                                         tw32_f(MAC_MODE, tp->mac_mode);
9322                                         udelay(40);
9323                                 }
9324                                 tg3_setup_phy(tp, 0);
9325                         }
9326                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9327                            tg3_flag(tp, 5780_CLASS)) {
9328                         tg3_serdes_parallel_detect(tp);
9329                 }
9330
9331                 tp->timer_counter = tp->timer_multiplier;
9332         }
9333
9334         /* Heartbeat is only sent once every 2 seconds.
9335          *
9336          * The heartbeat is to tell the ASF firmware that the host
9337          * driver is still alive.  In the event that the OS crashes,
9338          * ASF needs to reset the hardware to free up the FIFO space
9339          * that may be filled with rx packets destined for the host.
9340          * If the FIFO is full, ASF will no longer function properly.
9341          *
9342          * Unintended resets have been reported on real time kernels
9343          * where the timer doesn't run on time.  Netpoll will also have
9344          * same problem.
9345          *
9346          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9347          * to check the ring condition when the heartbeat is expiring
9348          * before doing the reset.  This will prevent most unintended
9349          * resets.
9350          */
9351         if (!--tp->asf_counter) {
9352                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9353                         tg3_wait_for_event_ack(tp);
9354
9355                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9356                                       FWCMD_NICDRV_ALIVE3);
9357                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9358                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9359                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9360
9361                         tg3_generate_fw_event(tp);
9362                 }
9363                 tp->asf_counter = tp->asf_multiplier;
9364         }
9365
9366         spin_unlock(&tp->lock);
9367
9368 restart_timer:
9369         tp->timer.expires = jiffies + tp->timer_offset;
9370         add_timer(&tp->timer);
9371 }
9372
9373 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9374 {
9375         irq_handler_t fn;
9376         unsigned long flags;
9377         char *name;
9378         struct tg3_napi *tnapi = &tp->napi[irq_num];
9379
9380         if (tp->irq_cnt == 1)
9381                 name = tp->dev->name;
9382         else {
9383                 name = &tnapi->irq_lbl[0];
9384                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9385                 name[IFNAMSIZ-1] = 0;
9386         }
9387
9388         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9389                 fn = tg3_msi;
9390                 if (tg3_flag(tp, 1SHOT_MSI))
9391                         fn = tg3_msi_1shot;
9392                 flags = 0;
9393         } else {
9394                 fn = tg3_interrupt;
9395                 if (tg3_flag(tp, TAGGED_STATUS))
9396                         fn = tg3_interrupt_tagged;
9397                 flags = IRQF_SHARED;
9398         }
9399
9400         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9401 }
9402
9403 static int tg3_test_interrupt(struct tg3 *tp)
9404 {
9405         struct tg3_napi *tnapi = &tp->napi[0];
9406         struct net_device *dev = tp->dev;
9407         int err, i, intr_ok = 0;
9408         u32 val;
9409
9410         if (!netif_running(dev))
9411                 return -ENODEV;
9412
9413         tg3_disable_ints(tp);
9414
9415         free_irq(tnapi->irq_vec, tnapi);
9416
9417         /*
9418          * Turn off MSI one shot mode.  Otherwise this test has no
9419          * observable way to know whether the interrupt was delivered.
9420          */
9421         if (tg3_flag(tp, 57765_PLUS)) {
9422                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9423                 tw32(MSGINT_MODE, val);
9424         }
9425
9426         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9427                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9428         if (err)
9429                 return err;
9430
9431         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9432         tg3_enable_ints(tp);
9433
9434         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9435                tnapi->coal_now);
9436
9437         for (i = 0; i < 5; i++) {
9438                 u32 int_mbox, misc_host_ctrl;
9439
9440                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9441                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9442
9443                 if ((int_mbox != 0) ||
9444                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9445                         intr_ok = 1;
9446                         break;
9447                 }
9448
9449                 if (tg3_flag(tp, 57765_PLUS) &&
9450                     tnapi->hw_status->status_tag != tnapi->last_tag)
9451                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9452
9453                 msleep(10);
9454         }
9455
9456         tg3_disable_ints(tp);
9457
9458         free_irq(tnapi->irq_vec, tnapi);
9459
9460         err = tg3_request_irq(tp, 0);
9461
9462         if (err)
9463                 return err;
9464
9465         if (intr_ok) {
9466                 /* Reenable MSI one shot mode. */
9467                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9468                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9469                         tw32(MSGINT_MODE, val);
9470                 }
9471                 return 0;
9472         }
9473
9474         return -EIO;
9475 }
9476
9477 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9478  * successfully restored
9479  */
9480 static int tg3_test_msi(struct tg3 *tp)
9481 {
9482         int err;
9483         u16 pci_cmd;
9484
9485         if (!tg3_flag(tp, USING_MSI))
9486                 return 0;
9487
9488         /* Turn off SERR reporting in case MSI terminates with Master
9489          * Abort.
9490          */
9491         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9492         pci_write_config_word(tp->pdev, PCI_COMMAND,
9493                               pci_cmd & ~PCI_COMMAND_SERR);
9494
9495         err = tg3_test_interrupt(tp);
9496
9497         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9498
9499         if (!err)
9500                 return 0;
9501
9502         /* other failures */
9503         if (err != -EIO)
9504                 return err;
9505
9506         /* MSI test failed, go back to INTx mode */
9507         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9508                     "to INTx mode. Please report this failure to the PCI "
9509                     "maintainer and include system chipset information\n");
9510
9511         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9512
9513         pci_disable_msi(tp->pdev);
9514
9515         tg3_flag_clear(tp, USING_MSI);
9516         tp->napi[0].irq_vec = tp->pdev->irq;
9517
9518         err = tg3_request_irq(tp, 0);
9519         if (err)
9520                 return err;
9521
9522         /* Need to reset the chip because the MSI cycle may have terminated
9523          * with Master Abort.
9524          */
9525         tg3_full_lock(tp, 1);
9526
9527         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9528         err = tg3_init_hw(tp, 1);
9529
9530         tg3_full_unlock(tp);
9531
9532         if (err)
9533                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9534
9535         return err;
9536 }
9537
9538 static int tg3_request_firmware(struct tg3 *tp)
9539 {
9540         const __be32 *fw_data;
9541
9542         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9543                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9544                            tp->fw_needed);
9545                 return -ENOENT;
9546         }
9547
9548         fw_data = (void *)tp->fw->data;
9549
9550         /* Firmware blob starts with version numbers, followed by
9551          * start address and _full_ length including BSS sections
9552          * (which must be longer than the actual data, of course
9553          */
9554
9555         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9556         if (tp->fw_len < (tp->fw->size - 12)) {
9557                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9558                            tp->fw_len, tp->fw_needed);
9559                 release_firmware(tp->fw);
9560                 tp->fw = NULL;
9561                 return -EINVAL;
9562         }
9563
9564         /* We no longer need firmware; we have it. */
9565         tp->fw_needed = NULL;
9566         return 0;
9567 }
9568
9569 static bool tg3_enable_msix(struct tg3 *tp)
9570 {
9571         int i, rc;
9572         struct msix_entry msix_ent[tp->irq_max];
9573
9574         tp->irq_cnt = num_online_cpus();
9575         if (tp->irq_cnt > 1) {
9576                 /* We want as many rx rings enabled as there are cpus.
9577                  * In multiqueue MSI-X mode, the first MSI-X vector
9578                  * only deals with link interrupts, etc, so we add
9579                  * one to the number of vectors we are requesting.
9580                  */
9581                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9582         }
9583
9584         for (i = 0; i < tp->irq_max; i++) {
9585                 msix_ent[i].entry  = i;
9586                 msix_ent[i].vector = 0;
9587         }
9588
9589         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9590         if (rc < 0) {
9591                 return false;
9592         } else if (rc != 0) {
9593                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9594                         return false;
9595                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9596                               tp->irq_cnt, rc);
9597                 tp->irq_cnt = rc;
9598         }
9599
9600         for (i = 0; i < tp->irq_max; i++)
9601                 tp->napi[i].irq_vec = msix_ent[i].vector;
9602
9603         netif_set_real_num_tx_queues(tp->dev, 1);
9604         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9605         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9606                 pci_disable_msix(tp->pdev);
9607                 return false;
9608         }
9609
9610         if (tp->irq_cnt > 1) {
9611                 tg3_flag_set(tp, ENABLE_RSS);
9612
9613                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9614                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9615                         tg3_flag_set(tp, ENABLE_TSS);
9616                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9617                 }
9618         }
9619
9620         return true;
9621 }
9622
9623 static void tg3_ints_init(struct tg3 *tp)
9624 {
9625         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9626             !tg3_flag(tp, TAGGED_STATUS)) {
9627                 /* All MSI supporting chips should support tagged
9628                  * status.  Assert that this is the case.
9629                  */
9630                 netdev_warn(tp->dev,
9631                             "MSI without TAGGED_STATUS? Not using MSI\n");
9632                 goto defcfg;
9633         }
9634
9635         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9636                 tg3_flag_set(tp, USING_MSIX);
9637         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9638                 tg3_flag_set(tp, USING_MSI);
9639
9640         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9641                 u32 msi_mode = tr32(MSGINT_MODE);
9642                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9643                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9644                 if (!tg3_flag(tp, 1SHOT_MSI))
9645                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9646                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9647         }
9648 defcfg:
9649         if (!tg3_flag(tp, USING_MSIX)) {
9650                 tp->irq_cnt = 1;
9651                 tp->napi[0].irq_vec = tp->pdev->irq;
9652                 netif_set_real_num_tx_queues(tp->dev, 1);
9653                 netif_set_real_num_rx_queues(tp->dev, 1);
9654         }
9655 }
9656
9657 static void tg3_ints_fini(struct tg3 *tp)
9658 {
9659         if (tg3_flag(tp, USING_MSIX))
9660                 pci_disable_msix(tp->pdev);
9661         else if (tg3_flag(tp, USING_MSI))
9662                 pci_disable_msi(tp->pdev);
9663         tg3_flag_clear(tp, USING_MSI);
9664         tg3_flag_clear(tp, USING_MSIX);
9665         tg3_flag_clear(tp, ENABLE_RSS);
9666         tg3_flag_clear(tp, ENABLE_TSS);
9667 }
9668
9669 static int tg3_open(struct net_device *dev)
9670 {
9671         struct tg3 *tp = netdev_priv(dev);
9672         int i, err;
9673
9674         if (tp->fw_needed) {
9675                 err = tg3_request_firmware(tp);
9676                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9677                         if (err)
9678                                 return err;
9679                 } else if (err) {
9680                         netdev_warn(tp->dev, "TSO capability disabled\n");
9681                         tg3_flag_clear(tp, TSO_CAPABLE);
9682                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9683                         netdev_notice(tp->dev, "TSO capability restored\n");
9684                         tg3_flag_set(tp, TSO_CAPABLE);
9685                 }
9686         }
9687
9688         netif_carrier_off(tp->dev);
9689
9690         err = tg3_power_up(tp);
9691         if (err)
9692                 return err;
9693
9694         tg3_full_lock(tp, 0);
9695
9696         tg3_disable_ints(tp);
9697         tg3_flag_clear(tp, INIT_COMPLETE);
9698
9699         tg3_full_unlock(tp);
9700
9701         /*
9702          * Setup interrupts first so we know how
9703          * many NAPI resources to allocate
9704          */
9705         tg3_ints_init(tp);
9706
9707         tg3_rss_check_indir_tbl(tp);
9708
9709         /* The placement of this call is tied
9710          * to the setup and use of Host TX descriptors.
9711          */
9712         err = tg3_alloc_consistent(tp);
9713         if (err)
9714                 goto err_out1;
9715
9716         tg3_napi_init(tp);
9717
9718         tg3_napi_enable(tp);
9719
9720         for (i = 0; i < tp->irq_cnt; i++) {
9721                 struct tg3_napi *tnapi = &tp->napi[i];
9722                 err = tg3_request_irq(tp, i);
9723                 if (err) {
9724                         for (i--; i >= 0; i--) {
9725                                 tnapi = &tp->napi[i];
9726                                 free_irq(tnapi->irq_vec, tnapi);
9727                         }
9728                         goto err_out2;
9729                 }
9730         }
9731
9732         tg3_full_lock(tp, 0);
9733
9734         err = tg3_init_hw(tp, 1);
9735         if (err) {
9736                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9737                 tg3_free_rings(tp);
9738         } else {
9739                 if (tg3_flag(tp, TAGGED_STATUS) &&
9740                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9741                     !tg3_flag(tp, 57765_CLASS))
9742                         tp->timer_offset = HZ;
9743                 else
9744                         tp->timer_offset = HZ / 10;
9745
9746                 BUG_ON(tp->timer_offset > HZ);
9747                 tp->timer_counter = tp->timer_multiplier =
9748                         (HZ / tp->timer_offset);
9749                 tp->asf_counter = tp->asf_multiplier =
9750                         ((HZ / tp->timer_offset) * 2);
9751
9752                 init_timer(&tp->timer);
9753                 tp->timer.expires = jiffies + tp->timer_offset;
9754                 tp->timer.data = (unsigned long) tp;
9755                 tp->timer.function = tg3_timer;
9756         }
9757
9758         tg3_full_unlock(tp);
9759
9760         if (err)
9761                 goto err_out3;
9762
9763         if (tg3_flag(tp, USING_MSI)) {
9764                 err = tg3_test_msi(tp);
9765
9766                 if (err) {
9767                         tg3_full_lock(tp, 0);
9768                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9769                         tg3_free_rings(tp);
9770                         tg3_full_unlock(tp);
9771
9772                         goto err_out2;
9773                 }
9774
9775                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9776                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9777
9778                         tw32(PCIE_TRANSACTION_CFG,
9779                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9780                 }
9781         }
9782
9783         tg3_phy_start(tp);
9784
9785         tg3_full_lock(tp, 0);
9786
9787         add_timer(&tp->timer);
9788         tg3_flag_set(tp, INIT_COMPLETE);
9789         tg3_enable_ints(tp);
9790
9791         tg3_full_unlock(tp);
9792
9793         netif_tx_start_all_queues(dev);
9794
9795         /*
9796          * Reset loopback feature if it was turned on while the device was down
9797          * make sure that it's installed properly now.
9798          */
9799         if (dev->features & NETIF_F_LOOPBACK)
9800                 tg3_set_loopback(dev, dev->features);
9801
9802         return 0;
9803
9804 err_out3:
9805         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9806                 struct tg3_napi *tnapi = &tp->napi[i];
9807                 free_irq(tnapi->irq_vec, tnapi);
9808         }
9809
9810 err_out2:
9811         tg3_napi_disable(tp);
9812         tg3_napi_fini(tp);
9813         tg3_free_consistent(tp);
9814
9815 err_out1:
9816         tg3_ints_fini(tp);
9817         tg3_frob_aux_power(tp, false);
9818         pci_set_power_state(tp->pdev, PCI_D3hot);
9819         return err;
9820 }
9821
9822 static int tg3_close(struct net_device *dev)
9823 {
9824         int i;
9825         struct tg3 *tp = netdev_priv(dev);
9826
9827         tg3_napi_disable(tp);
9828         tg3_reset_task_cancel(tp);
9829
9830         netif_tx_stop_all_queues(dev);
9831
9832         del_timer_sync(&tp->timer);
9833
9834         tg3_phy_stop(tp);
9835
9836         tg3_full_lock(tp, 1);
9837
9838         tg3_disable_ints(tp);
9839
9840         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9841         tg3_free_rings(tp);
9842         tg3_flag_clear(tp, INIT_COMPLETE);
9843
9844         tg3_full_unlock(tp);
9845
9846         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9847                 struct tg3_napi *tnapi = &tp->napi[i];
9848                 free_irq(tnapi->irq_vec, tnapi);
9849         }
9850
9851         tg3_ints_fini(tp);
9852
9853         /* Clear stats across close / open calls */
9854         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9855         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9856
9857         tg3_napi_fini(tp);
9858
9859         tg3_free_consistent(tp);
9860
9861         tg3_power_down(tp);
9862
9863         netif_carrier_off(tp->dev);
9864
9865         return 0;
9866 }
9867
9868 static inline u64 get_stat64(tg3_stat64_t *val)
9869 {
9870        return ((u64)val->high << 32) | ((u64)val->low);
9871 }
9872
9873 static u64 calc_crc_errors(struct tg3 *tp)
9874 {
9875         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9876
9877         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9878             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9879              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9880                 u32 val;
9881
9882                 spin_lock_bh(&tp->lock);
9883                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9884                         tg3_writephy(tp, MII_TG3_TEST1,
9885                                      val | MII_TG3_TEST1_CRC_EN);
9886                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9887                 } else
9888                         val = 0;
9889                 spin_unlock_bh(&tp->lock);
9890
9891                 tp->phy_crc_errors += val;
9892
9893                 return tp->phy_crc_errors;
9894         }
9895
9896         return get_stat64(&hw_stats->rx_fcs_errors);
9897 }
9898
9899 #define ESTAT_ADD(member) \
9900         estats->member =        old_estats->member + \
9901                                 get_stat64(&hw_stats->member)
9902
9903 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9904                                                struct tg3_ethtool_stats *estats)
9905 {
9906         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9907         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9908
9909         if (!hw_stats)
9910                 return old_estats;
9911
9912         ESTAT_ADD(rx_octets);
9913         ESTAT_ADD(rx_fragments);
9914         ESTAT_ADD(rx_ucast_packets);
9915         ESTAT_ADD(rx_mcast_packets);
9916         ESTAT_ADD(rx_bcast_packets);
9917         ESTAT_ADD(rx_fcs_errors);
9918         ESTAT_ADD(rx_align_errors);
9919         ESTAT_ADD(rx_xon_pause_rcvd);
9920         ESTAT_ADD(rx_xoff_pause_rcvd);
9921         ESTAT_ADD(rx_mac_ctrl_rcvd);
9922         ESTAT_ADD(rx_xoff_entered);
9923         ESTAT_ADD(rx_frame_too_long_errors);
9924         ESTAT_ADD(rx_jabbers);
9925         ESTAT_ADD(rx_undersize_packets);
9926         ESTAT_ADD(rx_in_length_errors);
9927         ESTAT_ADD(rx_out_length_errors);
9928         ESTAT_ADD(rx_64_or_less_octet_packets);
9929         ESTAT_ADD(rx_65_to_127_octet_packets);
9930         ESTAT_ADD(rx_128_to_255_octet_packets);
9931         ESTAT_ADD(rx_256_to_511_octet_packets);
9932         ESTAT_ADD(rx_512_to_1023_octet_packets);
9933         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9934         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9935         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9936         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9937         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9938
9939         ESTAT_ADD(tx_octets);
9940         ESTAT_ADD(tx_collisions);
9941         ESTAT_ADD(tx_xon_sent);
9942         ESTAT_ADD(tx_xoff_sent);
9943         ESTAT_ADD(tx_flow_control);
9944         ESTAT_ADD(tx_mac_errors);
9945         ESTAT_ADD(tx_single_collisions);
9946         ESTAT_ADD(tx_mult_collisions);
9947         ESTAT_ADD(tx_deferred);
9948         ESTAT_ADD(tx_excessive_collisions);
9949         ESTAT_ADD(tx_late_collisions);
9950         ESTAT_ADD(tx_collide_2times);
9951         ESTAT_ADD(tx_collide_3times);
9952         ESTAT_ADD(tx_collide_4times);
9953         ESTAT_ADD(tx_collide_5times);
9954         ESTAT_ADD(tx_collide_6times);
9955         ESTAT_ADD(tx_collide_7times);
9956         ESTAT_ADD(tx_collide_8times);
9957         ESTAT_ADD(tx_collide_9times);
9958         ESTAT_ADD(tx_collide_10times);
9959         ESTAT_ADD(tx_collide_11times);
9960         ESTAT_ADD(tx_collide_12times);
9961         ESTAT_ADD(tx_collide_13times);
9962         ESTAT_ADD(tx_collide_14times);
9963         ESTAT_ADD(tx_collide_15times);
9964         ESTAT_ADD(tx_ucast_packets);
9965         ESTAT_ADD(tx_mcast_packets);
9966         ESTAT_ADD(tx_bcast_packets);
9967         ESTAT_ADD(tx_carrier_sense_errors);
9968         ESTAT_ADD(tx_discards);
9969         ESTAT_ADD(tx_errors);
9970
9971         ESTAT_ADD(dma_writeq_full);
9972         ESTAT_ADD(dma_write_prioq_full);
9973         ESTAT_ADD(rxbds_empty);
9974         ESTAT_ADD(rx_discards);
9975         ESTAT_ADD(rx_errors);
9976         ESTAT_ADD(rx_threshold_hit);
9977
9978         ESTAT_ADD(dma_readq_full);
9979         ESTAT_ADD(dma_read_prioq_full);
9980         ESTAT_ADD(tx_comp_queue_full);
9981
9982         ESTAT_ADD(ring_set_send_prod_index);
9983         ESTAT_ADD(ring_status_update);
9984         ESTAT_ADD(nic_irqs);
9985         ESTAT_ADD(nic_avoided_irqs);
9986         ESTAT_ADD(nic_tx_threshold_hit);
9987
9988         ESTAT_ADD(mbuf_lwm_thresh_hit);
9989
9990         return estats;
9991 }
9992
9993 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9994                                                  struct rtnl_link_stats64 *stats)
9995 {
9996         struct tg3 *tp = netdev_priv(dev);
9997         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9998         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9999
10000         if (!hw_stats)
10001                 return old_stats;
10002
10003         stats->rx_packets = old_stats->rx_packets +
10004                 get_stat64(&hw_stats->rx_ucast_packets) +
10005                 get_stat64(&hw_stats->rx_mcast_packets) +
10006                 get_stat64(&hw_stats->rx_bcast_packets);
10007
10008         stats->tx_packets = old_stats->tx_packets +
10009                 get_stat64(&hw_stats->tx_ucast_packets) +
10010                 get_stat64(&hw_stats->tx_mcast_packets) +
10011                 get_stat64(&hw_stats->tx_bcast_packets);
10012
10013         stats->rx_bytes = old_stats->rx_bytes +
10014                 get_stat64(&hw_stats->rx_octets);
10015         stats->tx_bytes = old_stats->tx_bytes +
10016                 get_stat64(&hw_stats->tx_octets);
10017
10018         stats->rx_errors = old_stats->rx_errors +
10019                 get_stat64(&hw_stats->rx_errors);
10020         stats->tx_errors = old_stats->tx_errors +
10021                 get_stat64(&hw_stats->tx_errors) +
10022                 get_stat64(&hw_stats->tx_mac_errors) +
10023                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10024                 get_stat64(&hw_stats->tx_discards);
10025
10026         stats->multicast = old_stats->multicast +
10027                 get_stat64(&hw_stats->rx_mcast_packets);
10028         stats->collisions = old_stats->collisions +
10029                 get_stat64(&hw_stats->tx_collisions);
10030
10031         stats->rx_length_errors = old_stats->rx_length_errors +
10032                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10033                 get_stat64(&hw_stats->rx_undersize_packets);
10034
10035         stats->rx_over_errors = old_stats->rx_over_errors +
10036                 get_stat64(&hw_stats->rxbds_empty);
10037         stats->rx_frame_errors = old_stats->rx_frame_errors +
10038                 get_stat64(&hw_stats->rx_align_errors);
10039         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10040                 get_stat64(&hw_stats->tx_discards);
10041         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10042                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10043
10044         stats->rx_crc_errors = old_stats->rx_crc_errors +
10045                 calc_crc_errors(tp);
10046
10047         stats->rx_missed_errors = old_stats->rx_missed_errors +
10048                 get_stat64(&hw_stats->rx_discards);
10049
10050         stats->rx_dropped = tp->rx_dropped;
10051         stats->tx_dropped = tp->tx_dropped;
10052
10053         return stats;
10054 }
10055
10056 static int tg3_get_regs_len(struct net_device *dev)
10057 {
10058         return TG3_REG_BLK_SIZE;
10059 }
10060
10061 static void tg3_get_regs(struct net_device *dev,
10062                 struct ethtool_regs *regs, void *_p)
10063 {
10064         struct tg3 *tp = netdev_priv(dev);
10065
10066         regs->version = 0;
10067
10068         memset(_p, 0, TG3_REG_BLK_SIZE);
10069
10070         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10071                 return;
10072
10073         tg3_full_lock(tp, 0);
10074
10075         tg3_dump_legacy_regs(tp, (u32 *)_p);
10076
10077         tg3_full_unlock(tp);
10078 }
10079
10080 static int tg3_get_eeprom_len(struct net_device *dev)
10081 {
10082         struct tg3 *tp = netdev_priv(dev);
10083
10084         return tp->nvram_size;
10085 }
10086
10087 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10088 {
10089         struct tg3 *tp = netdev_priv(dev);
10090         int ret;
10091         u8  *pd;
10092         u32 i, offset, len, b_offset, b_count;
10093         __be32 val;
10094
10095         if (tg3_flag(tp, NO_NVRAM))
10096                 return -EINVAL;
10097
10098         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10099                 return -EAGAIN;
10100
10101         offset = eeprom->offset;
10102         len = eeprom->len;
10103         eeprom->len = 0;
10104
10105         eeprom->magic = TG3_EEPROM_MAGIC;
10106
10107         if (offset & 3) {
10108                 /* adjustments to start on required 4 byte boundary */
10109                 b_offset = offset & 3;
10110                 b_count = 4 - b_offset;
10111                 if (b_count > len) {
10112                         /* i.e. offset=1 len=2 */
10113                         b_count = len;
10114                 }
10115                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10116                 if (ret)
10117                         return ret;
10118                 memcpy(data, ((char *)&val) + b_offset, b_count);
10119                 len -= b_count;
10120                 offset += b_count;
10121                 eeprom->len += b_count;
10122         }
10123
10124         /* read bytes up to the last 4 byte boundary */
10125         pd = &data[eeprom->len];
10126         for (i = 0; i < (len - (len & 3)); i += 4) {
10127                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10128                 if (ret) {
10129                         eeprom->len += i;
10130                         return ret;
10131                 }
10132                 memcpy(pd + i, &val, 4);
10133         }
10134         eeprom->len += i;
10135
10136         if (len & 3) {
10137                 /* read last bytes not ending on 4 byte boundary */
10138                 pd = &data[eeprom->len];
10139                 b_count = len & 3;
10140                 b_offset = offset + len - b_count;
10141                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10142                 if (ret)
10143                         return ret;
10144                 memcpy(pd, &val, b_count);
10145                 eeprom->len += b_count;
10146         }
10147         return 0;
10148 }
10149
10150 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10151
10152 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10153 {
10154         struct tg3 *tp = netdev_priv(dev);
10155         int ret;
10156         u32 offset, len, b_offset, odd_len;
10157         u8 *buf;
10158         __be32 start, end;
10159
10160         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10161                 return -EAGAIN;
10162
10163         if (tg3_flag(tp, NO_NVRAM) ||
10164             eeprom->magic != TG3_EEPROM_MAGIC)
10165                 return -EINVAL;
10166
10167         offset = eeprom->offset;
10168         len = eeprom->len;
10169
10170         if ((b_offset = (offset & 3))) {
10171                 /* adjustments to start on required 4 byte boundary */
10172                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10173                 if (ret)
10174                         return ret;
10175                 len += b_offset;
10176                 offset &= ~3;
10177                 if (len < 4)
10178                         len = 4;
10179         }
10180
10181         odd_len = 0;
10182         if (len & 3) {
10183                 /* adjustments to end on required 4 byte boundary */
10184                 odd_len = 1;
10185                 len = (len + 3) & ~3;
10186                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10187                 if (ret)
10188                         return ret;
10189         }
10190
10191         buf = data;
10192         if (b_offset || odd_len) {
10193                 buf = kmalloc(len, GFP_KERNEL);
10194                 if (!buf)
10195                         return -ENOMEM;
10196                 if (b_offset)
10197                         memcpy(buf, &start, 4);
10198                 if (odd_len)
10199                         memcpy(buf+len-4, &end, 4);
10200                 memcpy(buf + b_offset, data, eeprom->len);
10201         }
10202
10203         ret = tg3_nvram_write_block(tp, offset, len, buf);
10204
10205         if (buf != data)
10206                 kfree(buf);
10207
10208         return ret;
10209 }
10210
10211 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10212 {
10213         struct tg3 *tp = netdev_priv(dev);
10214
10215         if (tg3_flag(tp, USE_PHYLIB)) {
10216                 struct phy_device *phydev;
10217                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10218                         return -EAGAIN;
10219                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10220                 return phy_ethtool_gset(phydev, cmd);
10221         }
10222
10223         cmd->supported = (SUPPORTED_Autoneg);
10224
10225         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10226                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10227                                    SUPPORTED_1000baseT_Full);
10228
10229         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10230                 cmd->supported |= (SUPPORTED_100baseT_Half |
10231                                   SUPPORTED_100baseT_Full |
10232                                   SUPPORTED_10baseT_Half |
10233                                   SUPPORTED_10baseT_Full |
10234                                   SUPPORTED_TP);
10235                 cmd->port = PORT_TP;
10236         } else {
10237                 cmd->supported |= SUPPORTED_FIBRE;
10238                 cmd->port = PORT_FIBRE;
10239         }
10240
10241         cmd->advertising = tp->link_config.advertising;
10242         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10243                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10244                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10245                                 cmd->advertising |= ADVERTISED_Pause;
10246                         } else {
10247                                 cmd->advertising |= ADVERTISED_Pause |
10248                                                     ADVERTISED_Asym_Pause;
10249                         }
10250                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10251                         cmd->advertising |= ADVERTISED_Asym_Pause;
10252                 }
10253         }
10254         if (netif_running(dev) && netif_carrier_ok(dev)) {
10255                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10256                 cmd->duplex = tp->link_config.active_duplex;
10257                 cmd->lp_advertising = tp->link_config.rmt_adv;
10258                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10259                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10260                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10261                         else
10262                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10263                 }
10264         } else {
10265                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10266                 cmd->duplex = DUPLEX_INVALID;
10267                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10268         }
10269         cmd->phy_address = tp->phy_addr;
10270         cmd->transceiver = XCVR_INTERNAL;
10271         cmd->autoneg = tp->link_config.autoneg;
10272         cmd->maxtxpkt = 0;
10273         cmd->maxrxpkt = 0;
10274         return 0;
10275 }
10276
10277 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10278 {
10279         struct tg3 *tp = netdev_priv(dev);
10280         u32 speed = ethtool_cmd_speed(cmd);
10281
10282         if (tg3_flag(tp, USE_PHYLIB)) {
10283                 struct phy_device *phydev;
10284                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10285                         return -EAGAIN;
10286                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10287                 return phy_ethtool_sset(phydev, cmd);
10288         }
10289
10290         if (cmd->autoneg != AUTONEG_ENABLE &&
10291             cmd->autoneg != AUTONEG_DISABLE)
10292                 return -EINVAL;
10293
10294         if (cmd->autoneg == AUTONEG_DISABLE &&
10295             cmd->duplex != DUPLEX_FULL &&
10296             cmd->duplex != DUPLEX_HALF)
10297                 return -EINVAL;
10298
10299         if (cmd->autoneg == AUTONEG_ENABLE) {
10300                 u32 mask = ADVERTISED_Autoneg |
10301                            ADVERTISED_Pause |
10302                            ADVERTISED_Asym_Pause;
10303
10304                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10305                         mask |= ADVERTISED_1000baseT_Half |
10306                                 ADVERTISED_1000baseT_Full;
10307
10308                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10309                         mask |= ADVERTISED_100baseT_Half |
10310                                 ADVERTISED_100baseT_Full |
10311                                 ADVERTISED_10baseT_Half |
10312                                 ADVERTISED_10baseT_Full |
10313                                 ADVERTISED_TP;
10314                 else
10315                         mask |= ADVERTISED_FIBRE;
10316
10317                 if (cmd->advertising & ~mask)
10318                         return -EINVAL;
10319
10320                 mask &= (ADVERTISED_1000baseT_Half |
10321                          ADVERTISED_1000baseT_Full |
10322                          ADVERTISED_100baseT_Half |
10323                          ADVERTISED_100baseT_Full |
10324                          ADVERTISED_10baseT_Half |
10325                          ADVERTISED_10baseT_Full);
10326
10327                 cmd->advertising &= mask;
10328         } else {
10329                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10330                         if (speed != SPEED_1000)
10331                                 return -EINVAL;
10332
10333                         if (cmd->duplex != DUPLEX_FULL)
10334                                 return -EINVAL;
10335                 } else {
10336                         if (speed != SPEED_100 &&
10337                             speed != SPEED_10)
10338                                 return -EINVAL;
10339                 }
10340         }
10341
10342         tg3_full_lock(tp, 0);
10343
10344         tp->link_config.autoneg = cmd->autoneg;
10345         if (cmd->autoneg == AUTONEG_ENABLE) {
10346                 tp->link_config.advertising = (cmd->advertising |
10347                                               ADVERTISED_Autoneg);
10348                 tp->link_config.speed = SPEED_INVALID;
10349                 tp->link_config.duplex = DUPLEX_INVALID;
10350         } else {
10351                 tp->link_config.advertising = 0;
10352                 tp->link_config.speed = speed;
10353                 tp->link_config.duplex = cmd->duplex;
10354         }
10355
10356         tp->link_config.orig_speed = tp->link_config.speed;
10357         tp->link_config.orig_duplex = tp->link_config.duplex;
10358         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10359
10360         if (netif_running(dev))
10361                 tg3_setup_phy(tp, 1);
10362
10363         tg3_full_unlock(tp);
10364
10365         return 0;
10366 }
10367
10368 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10369 {
10370         struct tg3 *tp = netdev_priv(dev);
10371
10372         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10373         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10374         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10375         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10376 }
10377
10378 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10379 {
10380         struct tg3 *tp = netdev_priv(dev);
10381
10382         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10383                 wol->supported = WAKE_MAGIC;
10384         else
10385                 wol->supported = 0;
10386         wol->wolopts = 0;
10387         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10388                 wol->wolopts = WAKE_MAGIC;
10389         memset(&wol->sopass, 0, sizeof(wol->sopass));
10390 }
10391
10392 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10393 {
10394         struct tg3 *tp = netdev_priv(dev);
10395         struct device *dp = &tp->pdev->dev;
10396
10397         if (wol->wolopts & ~WAKE_MAGIC)
10398                 return -EINVAL;
10399         if ((wol->wolopts & WAKE_MAGIC) &&
10400             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10401                 return -EINVAL;
10402
10403         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10404
10405         spin_lock_bh(&tp->lock);
10406         if (device_may_wakeup(dp))
10407                 tg3_flag_set(tp, WOL_ENABLE);
10408         else
10409                 tg3_flag_clear(tp, WOL_ENABLE);
10410         spin_unlock_bh(&tp->lock);
10411
10412         return 0;
10413 }
10414
10415 static u32 tg3_get_msglevel(struct net_device *dev)
10416 {
10417         struct tg3 *tp = netdev_priv(dev);
10418         return tp->msg_enable;
10419 }
10420
10421 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10422 {
10423         struct tg3 *tp = netdev_priv(dev);
10424         tp->msg_enable = value;
10425 }
10426
10427 static int tg3_nway_reset(struct net_device *dev)
10428 {
10429         struct tg3 *tp = netdev_priv(dev);
10430         int r;
10431
10432         if (!netif_running(dev))
10433                 return -EAGAIN;
10434
10435         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10436                 return -EINVAL;
10437
10438         if (tg3_flag(tp, USE_PHYLIB)) {
10439                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10440                         return -EAGAIN;
10441                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10442         } else {
10443                 u32 bmcr;
10444
10445                 spin_lock_bh(&tp->lock);
10446                 r = -EINVAL;
10447                 tg3_readphy(tp, MII_BMCR, &bmcr);
10448                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10449                     ((bmcr & BMCR_ANENABLE) ||
10450                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10451                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10452                                                    BMCR_ANENABLE);
10453                         r = 0;
10454                 }
10455                 spin_unlock_bh(&tp->lock);
10456         }
10457
10458         return r;
10459 }
10460
10461 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10462 {
10463         struct tg3 *tp = netdev_priv(dev);
10464
10465         ering->rx_max_pending = tp->rx_std_ring_mask;
10466         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10467                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10468         else
10469                 ering->rx_jumbo_max_pending = 0;
10470
10471         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10472
10473         ering->rx_pending = tp->rx_pending;
10474         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10475                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10476         else
10477                 ering->rx_jumbo_pending = 0;
10478
10479         ering->tx_pending = tp->napi[0].tx_pending;
10480 }
10481
10482 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10483 {
10484         struct tg3 *tp = netdev_priv(dev);
10485         int i, irq_sync = 0, err = 0;
10486
10487         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10488             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10489             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10490             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10491             (tg3_flag(tp, TSO_BUG) &&
10492              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10493                 return -EINVAL;
10494
10495         if (netif_running(dev)) {
10496                 tg3_phy_stop(tp);
10497                 tg3_netif_stop(tp);
10498                 irq_sync = 1;
10499         }
10500
10501         tg3_full_lock(tp, irq_sync);
10502
10503         tp->rx_pending = ering->rx_pending;
10504
10505         if (tg3_flag(tp, MAX_RXPEND_64) &&
10506             tp->rx_pending > 63)
10507                 tp->rx_pending = 63;
10508         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10509
10510         for (i = 0; i < tp->irq_max; i++)
10511                 tp->napi[i].tx_pending = ering->tx_pending;
10512
10513         if (netif_running(dev)) {
10514                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10515                 err = tg3_restart_hw(tp, 1);
10516                 if (!err)
10517                         tg3_netif_start(tp);
10518         }
10519
10520         tg3_full_unlock(tp);
10521
10522         if (irq_sync && !err)
10523                 tg3_phy_start(tp);
10524
10525         return err;
10526 }
10527
10528 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10529 {
10530         struct tg3 *tp = netdev_priv(dev);
10531
10532         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10533
10534         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10535                 epause->rx_pause = 1;
10536         else
10537                 epause->rx_pause = 0;
10538
10539         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10540                 epause->tx_pause = 1;
10541         else
10542                 epause->tx_pause = 0;
10543 }
10544
10545 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10546 {
10547         struct tg3 *tp = netdev_priv(dev);
10548         int err = 0;
10549
10550         if (tg3_flag(tp, USE_PHYLIB)) {
10551                 u32 newadv;
10552                 struct phy_device *phydev;
10553
10554                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10555
10556                 if (!(phydev->supported & SUPPORTED_Pause) ||
10557                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10558                      (epause->rx_pause != epause->tx_pause)))
10559                         return -EINVAL;
10560
10561                 tp->link_config.flowctrl = 0;
10562                 if (epause->rx_pause) {
10563                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10564
10565                         if (epause->tx_pause) {
10566                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10567                                 newadv = ADVERTISED_Pause;
10568                         } else
10569                                 newadv = ADVERTISED_Pause |
10570                                          ADVERTISED_Asym_Pause;
10571                 } else if (epause->tx_pause) {
10572                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10573                         newadv = ADVERTISED_Asym_Pause;
10574                 } else
10575                         newadv = 0;
10576
10577                 if (epause->autoneg)
10578                         tg3_flag_set(tp, PAUSE_AUTONEG);
10579                 else
10580                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10581
10582                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10583                         u32 oldadv = phydev->advertising &
10584                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10585                         if (oldadv != newadv) {
10586                                 phydev->advertising &=
10587                                         ~(ADVERTISED_Pause |
10588                                           ADVERTISED_Asym_Pause);
10589                                 phydev->advertising |= newadv;
10590                                 if (phydev->autoneg) {
10591                                         /*
10592                                          * Always renegotiate the link to
10593                                          * inform our link partner of our
10594                                          * flow control settings, even if the
10595                                          * flow control is forced.  Let
10596                                          * tg3_adjust_link() do the final
10597                                          * flow control setup.
10598                                          */
10599                                         return phy_start_aneg(phydev);
10600                                 }
10601                         }
10602
10603                         if (!epause->autoneg)
10604                                 tg3_setup_flow_control(tp, 0, 0);
10605                 } else {
10606                         tp->link_config.orig_advertising &=
10607                                         ~(ADVERTISED_Pause |
10608                                           ADVERTISED_Asym_Pause);
10609                         tp->link_config.orig_advertising |= newadv;
10610                 }
10611         } else {
10612                 int irq_sync = 0;
10613
10614                 if (netif_running(dev)) {
10615                         tg3_netif_stop(tp);
10616                         irq_sync = 1;
10617                 }
10618
10619                 tg3_full_lock(tp, irq_sync);
10620
10621                 if (epause->autoneg)
10622                         tg3_flag_set(tp, PAUSE_AUTONEG);
10623                 else
10624                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10625                 if (epause->rx_pause)
10626                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10627                 else
10628                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10629                 if (epause->tx_pause)
10630                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10631                 else
10632                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10633
10634                 if (netif_running(dev)) {
10635                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10636                         err = tg3_restart_hw(tp, 1);
10637                         if (!err)
10638                                 tg3_netif_start(tp);
10639                 }
10640
10641                 tg3_full_unlock(tp);
10642         }
10643
10644         return err;
10645 }
10646
10647 static int tg3_get_sset_count(struct net_device *dev, int sset)
10648 {
10649         switch (sset) {
10650         case ETH_SS_TEST:
10651                 return TG3_NUM_TEST;
10652         case ETH_SS_STATS:
10653                 return TG3_NUM_STATS;
10654         default:
10655                 return -EOPNOTSUPP;
10656         }
10657 }
10658
10659 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10660                          u32 *rules __always_unused)
10661 {
10662         struct tg3 *tp = netdev_priv(dev);
10663
10664         if (!tg3_flag(tp, SUPPORT_MSIX))
10665                 return -EOPNOTSUPP;
10666
10667         switch (info->cmd) {
10668         case ETHTOOL_GRXRINGS:
10669                 if (netif_running(tp->dev))
10670                         info->data = tp->irq_cnt;
10671                 else {
10672                         info->data = num_online_cpus();
10673                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10674                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10675                 }
10676
10677                 /* The first interrupt vector only
10678                  * handles link interrupts.
10679                  */
10680                 info->data -= 1;
10681                 return 0;
10682
10683         default:
10684                 return -EOPNOTSUPP;
10685         }
10686 }
10687
10688 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10689 {
10690         u32 size = 0;
10691         struct tg3 *tp = netdev_priv(dev);
10692
10693         if (tg3_flag(tp, SUPPORT_MSIX))
10694                 size = TG3_RSS_INDIR_TBL_SIZE;
10695
10696         return size;
10697 }
10698
10699 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10700 {
10701         struct tg3 *tp = netdev_priv(dev);
10702         int i;
10703
10704         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10705                 indir[i] = tp->rss_ind_tbl[i];
10706
10707         return 0;
10708 }
10709
10710 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10711 {
10712         struct tg3 *tp = netdev_priv(dev);
10713         size_t i;
10714
10715         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10716                 tp->rss_ind_tbl[i] = indir[i];
10717
10718         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10719                 return 0;
10720
10721         /* It is legal to write the indirection
10722          * table while the device is running.
10723          */
10724         tg3_full_lock(tp, 0);
10725         tg3_rss_write_indir_tbl(tp);
10726         tg3_full_unlock(tp);
10727
10728         return 0;
10729 }
10730
10731 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10732 {
10733         switch (stringset) {
10734         case ETH_SS_STATS:
10735                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10736                 break;
10737         case ETH_SS_TEST:
10738                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10739                 break;
10740         default:
10741                 WARN_ON(1);     /* we need a WARN() */
10742                 break;
10743         }
10744 }
10745
10746 static int tg3_set_phys_id(struct net_device *dev,
10747                             enum ethtool_phys_id_state state)
10748 {
10749         struct tg3 *tp = netdev_priv(dev);
10750
10751         if (!netif_running(tp->dev))
10752                 return -EAGAIN;
10753
10754         switch (state) {
10755         case ETHTOOL_ID_ACTIVE:
10756                 return 1;       /* cycle on/off once per second */
10757
10758         case ETHTOOL_ID_ON:
10759                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10760                      LED_CTRL_1000MBPS_ON |
10761                      LED_CTRL_100MBPS_ON |
10762                      LED_CTRL_10MBPS_ON |
10763                      LED_CTRL_TRAFFIC_OVERRIDE |
10764                      LED_CTRL_TRAFFIC_BLINK |
10765                      LED_CTRL_TRAFFIC_LED);
10766                 break;
10767
10768         case ETHTOOL_ID_OFF:
10769                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10770                      LED_CTRL_TRAFFIC_OVERRIDE);
10771                 break;
10772
10773         case ETHTOOL_ID_INACTIVE:
10774                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10775                 break;
10776         }
10777
10778         return 0;
10779 }
10780
10781 static void tg3_get_ethtool_stats(struct net_device *dev,
10782                                    struct ethtool_stats *estats, u64 *tmp_stats)
10783 {
10784         struct tg3 *tp = netdev_priv(dev);
10785
10786         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10787 }
10788
10789 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10790 {
10791         int i;
10792         __be32 *buf;
10793         u32 offset = 0, len = 0;
10794         u32 magic, val;
10795
10796         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10797                 return NULL;
10798
10799         if (magic == TG3_EEPROM_MAGIC) {
10800                 for (offset = TG3_NVM_DIR_START;
10801                      offset < TG3_NVM_DIR_END;
10802                      offset += TG3_NVM_DIRENT_SIZE) {
10803                         if (tg3_nvram_read(tp, offset, &val))
10804                                 return NULL;
10805
10806                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10807                             TG3_NVM_DIRTYPE_EXTVPD)
10808                                 break;
10809                 }
10810
10811                 if (offset != TG3_NVM_DIR_END) {
10812                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10813                         if (tg3_nvram_read(tp, offset + 4, &offset))
10814                                 return NULL;
10815
10816                         offset = tg3_nvram_logical_addr(tp, offset);
10817                 }
10818         }
10819
10820         if (!offset || !len) {
10821                 offset = TG3_NVM_VPD_OFF;
10822                 len = TG3_NVM_VPD_LEN;
10823         }
10824
10825         buf = kmalloc(len, GFP_KERNEL);
10826         if (buf == NULL)
10827                 return NULL;
10828
10829         if (magic == TG3_EEPROM_MAGIC) {
10830                 for (i = 0; i < len; i += 4) {
10831                         /* The data is in little-endian format in NVRAM.
10832                          * Use the big-endian read routines to preserve
10833                          * the byte order as it exists in NVRAM.
10834                          */
10835                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10836                                 goto error;
10837                 }
10838         } else {
10839                 u8 *ptr;
10840                 ssize_t cnt;
10841                 unsigned int pos = 0;
10842
10843                 ptr = (u8 *)&buf[0];
10844                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10845                         cnt = pci_read_vpd(tp->pdev, pos,
10846                                            len - pos, ptr);
10847                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10848                                 cnt = 0;
10849                         else if (cnt < 0)
10850                                 goto error;
10851                 }
10852                 if (pos != len)
10853                         goto error;
10854         }
10855
10856         *vpdlen = len;
10857
10858         return buf;
10859
10860 error:
10861         kfree(buf);
10862         return NULL;
10863 }
10864
10865 #define NVRAM_TEST_SIZE 0x100
10866 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10867 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10868 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10869 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10870 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10871 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10872 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10873 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10874
10875 static int tg3_test_nvram(struct tg3 *tp)
10876 {
10877         u32 csum, magic, len;
10878         __be32 *buf;
10879         int i, j, k, err = 0, size;
10880
10881         if (tg3_flag(tp, NO_NVRAM))
10882                 return 0;
10883
10884         if (tg3_nvram_read(tp, 0, &magic) != 0)
10885                 return -EIO;
10886
10887         if (magic == TG3_EEPROM_MAGIC)
10888                 size = NVRAM_TEST_SIZE;
10889         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10890                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10891                     TG3_EEPROM_SB_FORMAT_1) {
10892                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10893                         case TG3_EEPROM_SB_REVISION_0:
10894                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10895                                 break;
10896                         case TG3_EEPROM_SB_REVISION_2:
10897                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10898                                 break;
10899                         case TG3_EEPROM_SB_REVISION_3:
10900                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10901                                 break;
10902                         case TG3_EEPROM_SB_REVISION_4:
10903                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10904                                 break;
10905                         case TG3_EEPROM_SB_REVISION_5:
10906                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10907                                 break;
10908                         case TG3_EEPROM_SB_REVISION_6:
10909                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10910                                 break;
10911                         default:
10912                                 return -EIO;
10913                         }
10914                 } else
10915                         return 0;
10916         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10917                 size = NVRAM_SELFBOOT_HW_SIZE;
10918         else
10919                 return -EIO;
10920
10921         buf = kmalloc(size, GFP_KERNEL);
10922         if (buf == NULL)
10923                 return -ENOMEM;
10924
10925         err = -EIO;
10926         for (i = 0, j = 0; i < size; i += 4, j++) {
10927                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10928                 if (err)
10929                         break;
10930         }
10931         if (i < size)
10932                 goto out;
10933
10934         /* Selfboot format */
10935         magic = be32_to_cpu(buf[0]);
10936         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10937             TG3_EEPROM_MAGIC_FW) {
10938                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10939
10940                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10941                     TG3_EEPROM_SB_REVISION_2) {
10942                         /* For rev 2, the csum doesn't include the MBA. */
10943                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10944                                 csum8 += buf8[i];
10945                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10946                                 csum8 += buf8[i];
10947                 } else {
10948                         for (i = 0; i < size; i++)
10949                                 csum8 += buf8[i];
10950                 }
10951
10952                 if (csum8 == 0) {
10953                         err = 0;
10954                         goto out;
10955                 }
10956
10957                 err = -EIO;
10958                 goto out;
10959         }
10960
10961         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10962             TG3_EEPROM_MAGIC_HW) {
10963                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10964                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10965                 u8 *buf8 = (u8 *) buf;
10966
10967                 /* Separate the parity bits and the data bytes.  */
10968                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10969                         if ((i == 0) || (i == 8)) {
10970                                 int l;
10971                                 u8 msk;
10972
10973                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10974                                         parity[k++] = buf8[i] & msk;
10975                                 i++;
10976                         } else if (i == 16) {
10977                                 int l;
10978                                 u8 msk;
10979
10980                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10981                                         parity[k++] = buf8[i] & msk;
10982                                 i++;
10983
10984                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10985                                         parity[k++] = buf8[i] & msk;
10986                                 i++;
10987                         }
10988                         data[j++] = buf8[i];
10989                 }
10990
10991                 err = -EIO;
10992                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10993                         u8 hw8 = hweight8(data[i]);
10994
10995                         if ((hw8 & 0x1) && parity[i])
10996                                 goto out;
10997                         else if (!(hw8 & 0x1) && !parity[i])
10998                                 goto out;
10999                 }
11000                 err = 0;
11001                 goto out;
11002         }
11003
11004         err = -EIO;
11005
11006         /* Bootstrap checksum at offset 0x10 */
11007         csum = calc_crc((unsigned char *) buf, 0x10);
11008         if (csum != le32_to_cpu(buf[0x10/4]))
11009                 goto out;
11010
11011         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11012         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11013         if (csum != le32_to_cpu(buf[0xfc/4]))
11014                 goto out;
11015
11016         kfree(buf);
11017
11018         buf = tg3_vpd_readblock(tp, &len);
11019         if (!buf)
11020                 return -ENOMEM;
11021
11022         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11023         if (i > 0) {
11024                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11025                 if (j < 0)
11026                         goto out;
11027
11028                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11029                         goto out;
11030
11031                 i += PCI_VPD_LRDT_TAG_SIZE;
11032                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11033                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11034                 if (j > 0) {
11035                         u8 csum8 = 0;
11036
11037                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11038
11039                         for (i = 0; i <= j; i++)
11040                                 csum8 += ((u8 *)buf)[i];
11041
11042                         if (csum8)
11043                                 goto out;
11044                 }
11045         }
11046
11047         err = 0;
11048
11049 out:
11050         kfree(buf);
11051         return err;
11052 }
11053
11054 #define TG3_SERDES_TIMEOUT_SEC  2
11055 #define TG3_COPPER_TIMEOUT_SEC  6
11056
11057 static int tg3_test_link(struct tg3 *tp)
11058 {
11059         int i, max;
11060
11061         if (!netif_running(tp->dev))
11062                 return -ENODEV;
11063
11064         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11065                 max = TG3_SERDES_TIMEOUT_SEC;
11066         else
11067                 max = TG3_COPPER_TIMEOUT_SEC;
11068
11069         for (i = 0; i < max; i++) {
11070                 if (netif_carrier_ok(tp->dev))
11071                         return 0;
11072
11073                 if (msleep_interruptible(1000))
11074                         break;
11075         }
11076
11077         return -EIO;
11078 }
11079
11080 /* Only test the commonly used registers */
11081 static int tg3_test_registers(struct tg3 *tp)
11082 {
11083         int i, is_5705, is_5750;
11084         u32 offset, read_mask, write_mask, val, save_val, read_val;
11085         static struct {
11086                 u16 offset;
11087                 u16 flags;
11088 #define TG3_FL_5705     0x1
11089 #define TG3_FL_NOT_5705 0x2
11090 #define TG3_FL_NOT_5788 0x4
11091 #define TG3_FL_NOT_5750 0x8
11092                 u32 read_mask;
11093                 u32 write_mask;
11094         } reg_tbl[] = {
11095                 /* MAC Control Registers */
11096                 { MAC_MODE, TG3_FL_NOT_5705,
11097                         0x00000000, 0x00ef6f8c },
11098                 { MAC_MODE, TG3_FL_5705,
11099                         0x00000000, 0x01ef6b8c },
11100                 { MAC_STATUS, TG3_FL_NOT_5705,
11101                         0x03800107, 0x00000000 },
11102                 { MAC_STATUS, TG3_FL_5705,
11103                         0x03800100, 0x00000000 },
11104                 { MAC_ADDR_0_HIGH, 0x0000,
11105                         0x00000000, 0x0000ffff },
11106                 { MAC_ADDR_0_LOW, 0x0000,
11107                         0x00000000, 0xffffffff },
11108                 { MAC_RX_MTU_SIZE, 0x0000,
11109                         0x00000000, 0x0000ffff },
11110                 { MAC_TX_MODE, 0x0000,
11111                         0x00000000, 0x00000070 },
11112                 { MAC_TX_LENGTHS, 0x0000,
11113                         0x00000000, 0x00003fff },
11114                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11115                         0x00000000, 0x000007fc },
11116                 { MAC_RX_MODE, TG3_FL_5705,
11117                         0x00000000, 0x000007dc },
11118                 { MAC_HASH_REG_0, 0x0000,
11119                         0x00000000, 0xffffffff },
11120                 { MAC_HASH_REG_1, 0x0000,
11121                         0x00000000, 0xffffffff },
11122                 { MAC_HASH_REG_2, 0x0000,
11123                         0x00000000, 0xffffffff },
11124                 { MAC_HASH_REG_3, 0x0000,
11125                         0x00000000, 0xffffffff },
11126
11127                 /* Receive Data and Receive BD Initiator Control Registers. */
11128                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11129                         0x00000000, 0xffffffff },
11130                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11131                         0x00000000, 0xffffffff },
11132                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11133                         0x00000000, 0x00000003 },
11134                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11135                         0x00000000, 0xffffffff },
11136                 { RCVDBDI_STD_BD+0, 0x0000,
11137                         0x00000000, 0xffffffff },
11138                 { RCVDBDI_STD_BD+4, 0x0000,
11139                         0x00000000, 0xffffffff },
11140                 { RCVDBDI_STD_BD+8, 0x0000,
11141                         0x00000000, 0xffff0002 },
11142                 { RCVDBDI_STD_BD+0xc, 0x0000,
11143                         0x00000000, 0xffffffff },
11144
11145                 /* Receive BD Initiator Control Registers. */
11146                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11147                         0x00000000, 0xffffffff },
11148                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11149                         0x00000000, 0x000003ff },
11150                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11151                         0x00000000, 0xffffffff },
11152
11153                 /* Host Coalescing Control Registers. */
11154                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11155                         0x00000000, 0x00000004 },
11156                 { HOSTCC_MODE, TG3_FL_5705,
11157                         0x00000000, 0x000000f6 },
11158                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11159                         0x00000000, 0xffffffff },
11160                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11161                         0x00000000, 0x000003ff },
11162                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11163                         0x00000000, 0xffffffff },
11164                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11165                         0x00000000, 0x000003ff },
11166                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11167                         0x00000000, 0xffffffff },
11168                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11169                         0x00000000, 0x000000ff },
11170                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11171                         0x00000000, 0xffffffff },
11172                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11173                         0x00000000, 0x000000ff },
11174                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11175                         0x00000000, 0xffffffff },
11176                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11177                         0x00000000, 0xffffffff },
11178                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11179                         0x00000000, 0xffffffff },
11180                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11181                         0x00000000, 0x000000ff },
11182                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11183                         0x00000000, 0xffffffff },
11184                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11185                         0x00000000, 0x000000ff },
11186                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11187                         0x00000000, 0xffffffff },
11188                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11189                         0x00000000, 0xffffffff },
11190                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11191                         0x00000000, 0xffffffff },
11192                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11193                         0x00000000, 0xffffffff },
11194                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11195                         0x00000000, 0xffffffff },
11196                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11197                         0xffffffff, 0x00000000 },
11198                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11199                         0xffffffff, 0x00000000 },
11200
11201                 /* Buffer Manager Control Registers. */
11202                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11203                         0x00000000, 0x007fff80 },
11204                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11205                         0x00000000, 0x007fffff },
11206                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11207                         0x00000000, 0x0000003f },
11208                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11209                         0x00000000, 0x000001ff },
11210                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11211                         0x00000000, 0x000001ff },
11212                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11213                         0xffffffff, 0x00000000 },
11214                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11215                         0xffffffff, 0x00000000 },
11216
11217                 /* Mailbox Registers */
11218                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11219                         0x00000000, 0x000001ff },
11220                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11221                         0x00000000, 0x000001ff },
11222                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11223                         0x00000000, 0x000007ff },
11224                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11225                         0x00000000, 0x000001ff },
11226
11227                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11228         };
11229
11230         is_5705 = is_5750 = 0;
11231         if (tg3_flag(tp, 5705_PLUS)) {
11232                 is_5705 = 1;
11233                 if (tg3_flag(tp, 5750_PLUS))
11234                         is_5750 = 1;
11235         }
11236
11237         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11238                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11239                         continue;
11240
11241                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11242                         continue;
11243
11244                 if (tg3_flag(tp, IS_5788) &&
11245                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11246                         continue;
11247
11248                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11249                         continue;
11250
11251                 offset = (u32) reg_tbl[i].offset;
11252                 read_mask = reg_tbl[i].read_mask;
11253                 write_mask = reg_tbl[i].write_mask;
11254
11255                 /* Save the original register content */
11256                 save_val = tr32(offset);
11257
11258                 /* Determine the read-only value. */
11259                 read_val = save_val & read_mask;
11260
11261                 /* Write zero to the register, then make sure the read-only bits
11262                  * are not changed and the read/write bits are all zeros.
11263                  */
11264                 tw32(offset, 0);
11265
11266                 val = tr32(offset);
11267
11268                 /* Test the read-only and read/write bits. */
11269                 if (((val & read_mask) != read_val) || (val & write_mask))
11270                         goto out;
11271
11272                 /* Write ones to all the bits defined by RdMask and WrMask, then
11273                  * make sure the read-only bits are not changed and the
11274                  * read/write bits are all ones.
11275                  */
11276                 tw32(offset, read_mask | write_mask);
11277
11278                 val = tr32(offset);
11279
11280                 /* Test the read-only bits. */
11281                 if ((val & read_mask) != read_val)
11282                         goto out;
11283
11284                 /* Test the read/write bits. */
11285                 if ((val & write_mask) != write_mask)
11286                         goto out;
11287
11288                 tw32(offset, save_val);
11289         }
11290
11291         return 0;
11292
11293 out:
11294         if (netif_msg_hw(tp))
11295                 netdev_err(tp->dev,
11296                            "Register test failed at offset %x\n", offset);
11297         tw32(offset, save_val);
11298         return -EIO;
11299 }
11300
11301 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11302 {
11303         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11304         int i;
11305         u32 j;
11306
11307         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11308                 for (j = 0; j < len; j += 4) {
11309                         u32 val;
11310
11311                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11312                         tg3_read_mem(tp, offset + j, &val);
11313                         if (val != test_pattern[i])
11314                                 return -EIO;
11315                 }
11316         }
11317         return 0;
11318 }
11319
11320 static int tg3_test_memory(struct tg3 *tp)
11321 {
11322         static struct mem_entry {
11323                 u32 offset;
11324                 u32 len;
11325         } mem_tbl_570x[] = {
11326                 { 0x00000000, 0x00b50},
11327                 { 0x00002000, 0x1c000},
11328                 { 0xffffffff, 0x00000}
11329         }, mem_tbl_5705[] = {
11330                 { 0x00000100, 0x0000c},
11331                 { 0x00000200, 0x00008},
11332                 { 0x00004000, 0x00800},
11333                 { 0x00006000, 0x01000},
11334                 { 0x00008000, 0x02000},
11335                 { 0x00010000, 0x0e000},
11336                 { 0xffffffff, 0x00000}
11337         }, mem_tbl_5755[] = {
11338                 { 0x00000200, 0x00008},
11339                 { 0x00004000, 0x00800},
11340                 { 0x00006000, 0x00800},
11341                 { 0x00008000, 0x02000},
11342                 { 0x00010000, 0x0c000},
11343                 { 0xffffffff, 0x00000}
11344         }, mem_tbl_5906[] = {
11345                 { 0x00000200, 0x00008},
11346                 { 0x00004000, 0x00400},
11347                 { 0x00006000, 0x00400},
11348                 { 0x00008000, 0x01000},
11349                 { 0x00010000, 0x01000},
11350                 { 0xffffffff, 0x00000}
11351         }, mem_tbl_5717[] = {
11352                 { 0x00000200, 0x00008},
11353                 { 0x00010000, 0x0a000},
11354                 { 0x00020000, 0x13c00},
11355                 { 0xffffffff, 0x00000}
11356         }, mem_tbl_57765[] = {
11357                 { 0x00000200, 0x00008},
11358                 { 0x00004000, 0x00800},
11359                 { 0x00006000, 0x09800},
11360                 { 0x00010000, 0x0a000},
11361                 { 0xffffffff, 0x00000}
11362         };
11363         struct mem_entry *mem_tbl;
11364         int err = 0;
11365         int i;
11366
11367         if (tg3_flag(tp, 5717_PLUS))
11368                 mem_tbl = mem_tbl_5717;
11369         else if (tg3_flag(tp, 57765_CLASS))
11370                 mem_tbl = mem_tbl_57765;
11371         else if (tg3_flag(tp, 5755_PLUS))
11372                 mem_tbl = mem_tbl_5755;
11373         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11374                 mem_tbl = mem_tbl_5906;
11375         else if (tg3_flag(tp, 5705_PLUS))
11376                 mem_tbl = mem_tbl_5705;
11377         else
11378                 mem_tbl = mem_tbl_570x;
11379
11380         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11381                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11382                 if (err)
11383                         break;
11384         }
11385
11386         return err;
11387 }
11388
11389 #define TG3_TSO_MSS             500
11390
11391 #define TG3_TSO_IP_HDR_LEN      20
11392 #define TG3_TSO_TCP_HDR_LEN     20
11393 #define TG3_TSO_TCP_OPT_LEN     12
11394
11395 static const u8 tg3_tso_header[] = {
11396 0x08, 0x00,
11397 0x45, 0x00, 0x00, 0x00,
11398 0x00, 0x00, 0x40, 0x00,
11399 0x40, 0x06, 0x00, 0x00,
11400 0x0a, 0x00, 0x00, 0x01,
11401 0x0a, 0x00, 0x00, 0x02,
11402 0x0d, 0x00, 0xe0, 0x00,
11403 0x00, 0x00, 0x01, 0x00,
11404 0x00, 0x00, 0x02, 0x00,
11405 0x80, 0x10, 0x10, 0x00,
11406 0x14, 0x09, 0x00, 0x00,
11407 0x01, 0x01, 0x08, 0x0a,
11408 0x11, 0x11, 0x11, 0x11,
11409 0x11, 0x11, 0x11, 0x11,
11410 };
11411
11412 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11413 {
11414         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11415         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11416         u32 budget;
11417         struct sk_buff *skb;
11418         u8 *tx_data, *rx_data;
11419         dma_addr_t map;
11420         int num_pkts, tx_len, rx_len, i, err;
11421         struct tg3_rx_buffer_desc *desc;
11422         struct tg3_napi *tnapi, *rnapi;
11423         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11424
11425         tnapi = &tp->napi[0];
11426         rnapi = &tp->napi[0];
11427         if (tp->irq_cnt > 1) {
11428                 if (tg3_flag(tp, ENABLE_RSS))
11429                         rnapi = &tp->napi[1];
11430                 if (tg3_flag(tp, ENABLE_TSS))
11431                         tnapi = &tp->napi[1];
11432         }
11433         coal_now = tnapi->coal_now | rnapi->coal_now;
11434
11435         err = -EIO;
11436
11437         tx_len = pktsz;
11438         skb = netdev_alloc_skb(tp->dev, tx_len);
11439         if (!skb)
11440                 return -ENOMEM;
11441
11442         tx_data = skb_put(skb, tx_len);
11443         memcpy(tx_data, tp->dev->dev_addr, 6);
11444         memset(tx_data + 6, 0x0, 8);
11445
11446         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11447
11448         if (tso_loopback) {
11449                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11450
11451                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11452                               TG3_TSO_TCP_OPT_LEN;
11453
11454                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11455                        sizeof(tg3_tso_header));
11456                 mss = TG3_TSO_MSS;
11457
11458                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11459                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11460
11461                 /* Set the total length field in the IP header */
11462                 iph->tot_len = htons((u16)(mss + hdr_len));
11463
11464                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11465                               TXD_FLAG_CPU_POST_DMA);
11466
11467                 if (tg3_flag(tp, HW_TSO_1) ||
11468                     tg3_flag(tp, HW_TSO_2) ||
11469                     tg3_flag(tp, HW_TSO_3)) {
11470                         struct tcphdr *th;
11471                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11472                         th = (struct tcphdr *)&tx_data[val];
11473                         th->check = 0;
11474                 } else
11475                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11476
11477                 if (tg3_flag(tp, HW_TSO_3)) {
11478                         mss |= (hdr_len & 0xc) << 12;
11479                         if (hdr_len & 0x10)
11480                                 base_flags |= 0x00000010;
11481                         base_flags |= (hdr_len & 0x3e0) << 5;
11482                 } else if (tg3_flag(tp, HW_TSO_2))
11483                         mss |= hdr_len << 9;
11484                 else if (tg3_flag(tp, HW_TSO_1) ||
11485                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11486                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11487                 } else {
11488                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11489                 }
11490
11491                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11492         } else {
11493                 num_pkts = 1;
11494                 data_off = ETH_HLEN;
11495         }
11496
11497         for (i = data_off; i < tx_len; i++)
11498                 tx_data[i] = (u8) (i & 0xff);
11499
11500         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11501         if (pci_dma_mapping_error(tp->pdev, map)) {
11502                 dev_kfree_skb(skb);
11503                 return -EIO;
11504         }
11505
11506         val = tnapi->tx_prod;
11507         tnapi->tx_buffers[val].skb = skb;
11508         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11509
11510         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11511                rnapi->coal_now);
11512
11513         udelay(10);
11514
11515         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11516
11517         budget = tg3_tx_avail(tnapi);
11518         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11519                             base_flags | TXD_FLAG_END, mss, 0)) {
11520                 tnapi->tx_buffers[val].skb = NULL;
11521                 dev_kfree_skb(skb);
11522                 return -EIO;
11523         }
11524
11525         tnapi->tx_prod++;
11526
11527         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11528         tr32_mailbox(tnapi->prodmbox);
11529
11530         udelay(10);
11531
11532         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11533         for (i = 0; i < 35; i++) {
11534                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11535                        coal_now);
11536
11537                 udelay(10);
11538
11539                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11540                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11541                 if ((tx_idx == tnapi->tx_prod) &&
11542                     (rx_idx == (rx_start_idx + num_pkts)))
11543                         break;
11544         }
11545
11546         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11547         dev_kfree_skb(skb);
11548
11549         if (tx_idx != tnapi->tx_prod)
11550                 goto out;
11551
11552         if (rx_idx != rx_start_idx + num_pkts)
11553                 goto out;
11554
11555         val = data_off;
11556         while (rx_idx != rx_start_idx) {
11557                 desc = &rnapi->rx_rcb[rx_start_idx++];
11558                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11559                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11560
11561                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11562                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11563                         goto out;
11564
11565                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11566                          - ETH_FCS_LEN;
11567
11568                 if (!tso_loopback) {
11569                         if (rx_len != tx_len)
11570                                 goto out;
11571
11572                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11573                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11574                                         goto out;
11575                         } else {
11576                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11577                                         goto out;
11578                         }
11579                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11580                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11581                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11582                         goto out;
11583                 }
11584
11585                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11586                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11587                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11588                                              mapping);
11589                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11590                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11591                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11592                                              mapping);
11593                 } else
11594                         goto out;
11595
11596                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11597                                             PCI_DMA_FROMDEVICE);
11598
11599                 rx_data += TG3_RX_OFFSET(tp);
11600                 for (i = data_off; i < rx_len; i++, val++) {
11601                         if (*(rx_data + i) != (u8) (val & 0xff))
11602                                 goto out;
11603                 }
11604         }
11605
11606         err = 0;
11607
11608         /* tg3_free_rings will unmap and free the rx_data */
11609 out:
11610         return err;
11611 }
11612
11613 #define TG3_STD_LOOPBACK_FAILED         1
11614 #define TG3_JMB_LOOPBACK_FAILED         2
11615 #define TG3_TSO_LOOPBACK_FAILED         4
11616 #define TG3_LOOPBACK_FAILED \
11617         (TG3_STD_LOOPBACK_FAILED | \
11618          TG3_JMB_LOOPBACK_FAILED | \
11619          TG3_TSO_LOOPBACK_FAILED)
11620
11621 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11622 {
11623         int err = -EIO;
11624         u32 eee_cap;
11625
11626         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11627         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11628
11629         if (!netif_running(tp->dev)) {
11630                 data[0] = TG3_LOOPBACK_FAILED;
11631                 data[1] = TG3_LOOPBACK_FAILED;
11632                 if (do_extlpbk)
11633                         data[2] = TG3_LOOPBACK_FAILED;
11634                 goto done;
11635         }
11636
11637         err = tg3_reset_hw(tp, 1);
11638         if (err) {
11639                 data[0] = TG3_LOOPBACK_FAILED;
11640                 data[1] = TG3_LOOPBACK_FAILED;
11641                 if (do_extlpbk)
11642                         data[2] = TG3_LOOPBACK_FAILED;
11643                 goto done;
11644         }
11645
11646         if (tg3_flag(tp, ENABLE_RSS)) {
11647                 int i;
11648
11649                 /* Reroute all rx packets to the 1st queue */
11650                 for (i = MAC_RSS_INDIR_TBL_0;
11651                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11652                         tw32(i, 0x0);
11653         }
11654
11655         /* HW errata - mac loopback fails in some cases on 5780.
11656          * Normal traffic and PHY loopback are not affected by
11657          * errata.  Also, the MAC loopback test is deprecated for
11658          * all newer ASIC revisions.
11659          */
11660         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11661             !tg3_flag(tp, CPMU_PRESENT)) {
11662                 tg3_mac_loopback(tp, true);
11663
11664                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11665                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11666
11667                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11668                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11669                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11670
11671                 tg3_mac_loopback(tp, false);
11672         }
11673
11674         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11675             !tg3_flag(tp, USE_PHYLIB)) {
11676                 int i;
11677
11678                 tg3_phy_lpbk_set(tp, 0, false);
11679
11680                 /* Wait for link */
11681                 for (i = 0; i < 100; i++) {
11682                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11683                                 break;
11684                         mdelay(1);
11685                 }
11686
11687                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11688                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11689                 if (tg3_flag(tp, TSO_CAPABLE) &&
11690                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11691                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11692                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11693                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11694                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11695
11696                 if (do_extlpbk) {
11697                         tg3_phy_lpbk_set(tp, 0, true);
11698
11699                         /* All link indications report up, but the hardware
11700                          * isn't really ready for about 20 msec.  Double it
11701                          * to be sure.
11702                          */
11703                         mdelay(40);
11704
11705                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11706                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11707                         if (tg3_flag(tp, TSO_CAPABLE) &&
11708                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11709                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11710                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11711                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11712                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11713                 }
11714
11715                 /* Re-enable gphy autopowerdown. */
11716                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11717                         tg3_phy_toggle_apd(tp, true);
11718         }
11719
11720         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11721
11722 done:
11723         tp->phy_flags |= eee_cap;
11724
11725         return err;
11726 }
11727
11728 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11729                           u64 *data)
11730 {
11731         struct tg3 *tp = netdev_priv(dev);
11732         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11733
11734         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11735             tg3_power_up(tp)) {
11736                 etest->flags |= ETH_TEST_FL_FAILED;
11737                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11738                 return;
11739         }
11740
11741         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11742
11743         if (tg3_test_nvram(tp) != 0) {
11744                 etest->flags |= ETH_TEST_FL_FAILED;
11745                 data[0] = 1;
11746         }
11747         if (!doextlpbk && tg3_test_link(tp)) {
11748                 etest->flags |= ETH_TEST_FL_FAILED;
11749                 data[1] = 1;
11750         }
11751         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11752                 int err, err2 = 0, irq_sync = 0;
11753
11754                 if (netif_running(dev)) {
11755                         tg3_phy_stop(tp);
11756                         tg3_netif_stop(tp);
11757                         irq_sync = 1;
11758                 }
11759
11760                 tg3_full_lock(tp, irq_sync);
11761
11762                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11763                 err = tg3_nvram_lock(tp);
11764                 tg3_halt_cpu(tp, RX_CPU_BASE);
11765                 if (!tg3_flag(tp, 5705_PLUS))
11766                         tg3_halt_cpu(tp, TX_CPU_BASE);
11767                 if (!err)
11768                         tg3_nvram_unlock(tp);
11769
11770                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11771                         tg3_phy_reset(tp);
11772
11773                 if (tg3_test_registers(tp) != 0) {
11774                         etest->flags |= ETH_TEST_FL_FAILED;
11775                         data[2] = 1;
11776                 }
11777
11778                 if (tg3_test_memory(tp) != 0) {
11779                         etest->flags |= ETH_TEST_FL_FAILED;
11780                         data[3] = 1;
11781                 }
11782
11783                 if (doextlpbk)
11784                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11785
11786                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11787                         etest->flags |= ETH_TEST_FL_FAILED;
11788
11789                 tg3_full_unlock(tp);
11790
11791                 if (tg3_test_interrupt(tp) != 0) {
11792                         etest->flags |= ETH_TEST_FL_FAILED;
11793                         data[7] = 1;
11794                 }
11795
11796                 tg3_full_lock(tp, 0);
11797
11798                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11799                 if (netif_running(dev)) {
11800                         tg3_flag_set(tp, INIT_COMPLETE);
11801                         err2 = tg3_restart_hw(tp, 1);
11802                         if (!err2)
11803                                 tg3_netif_start(tp);
11804                 }
11805
11806                 tg3_full_unlock(tp);
11807
11808                 if (irq_sync && !err2)
11809                         tg3_phy_start(tp);
11810         }
11811         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11812                 tg3_power_down(tp);
11813
11814 }
11815
11816 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11817 {
11818         struct mii_ioctl_data *data = if_mii(ifr);
11819         struct tg3 *tp = netdev_priv(dev);
11820         int err;
11821
11822         if (tg3_flag(tp, USE_PHYLIB)) {
11823                 struct phy_device *phydev;
11824                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11825                         return -EAGAIN;
11826                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11827                 return phy_mii_ioctl(phydev, ifr, cmd);
11828         }
11829
11830         switch (cmd) {
11831         case SIOCGMIIPHY:
11832                 data->phy_id = tp->phy_addr;
11833
11834                 /* fallthru */
11835         case SIOCGMIIREG: {
11836                 u32 mii_regval;
11837
11838                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11839                         break;                  /* We have no PHY */
11840
11841                 if (!netif_running(dev))
11842                         return -EAGAIN;
11843
11844                 spin_lock_bh(&tp->lock);
11845                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11846                 spin_unlock_bh(&tp->lock);
11847
11848                 data->val_out = mii_regval;
11849
11850                 return err;
11851         }
11852
11853         case SIOCSMIIREG:
11854                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11855                         break;                  /* We have no PHY */
11856
11857                 if (!netif_running(dev))
11858                         return -EAGAIN;
11859
11860                 spin_lock_bh(&tp->lock);
11861                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11862                 spin_unlock_bh(&tp->lock);
11863
11864                 return err;
11865
11866         default:
11867                 /* do nothing */
11868                 break;
11869         }
11870         return -EOPNOTSUPP;
11871 }
11872
11873 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11874 {
11875         struct tg3 *tp = netdev_priv(dev);
11876
11877         memcpy(ec, &tp->coal, sizeof(*ec));
11878         return 0;
11879 }
11880
11881 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11882 {
11883         struct tg3 *tp = netdev_priv(dev);
11884         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11885         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11886
11887         if (!tg3_flag(tp, 5705_PLUS)) {
11888                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11889                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11890                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11891                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11892         }
11893
11894         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11895             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11896             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11897             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11898             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11899             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11900             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11901             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11902             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11903             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11904                 return -EINVAL;
11905
11906         /* No rx interrupts will be generated if both are zero */
11907         if ((ec->rx_coalesce_usecs == 0) &&
11908             (ec->rx_max_coalesced_frames == 0))
11909                 return -EINVAL;
11910
11911         /* No tx interrupts will be generated if both are zero */
11912         if ((ec->tx_coalesce_usecs == 0) &&
11913             (ec->tx_max_coalesced_frames == 0))
11914                 return -EINVAL;
11915
11916         /* Only copy relevant parameters, ignore all others. */
11917         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11918         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11919         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11920         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11921         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11922         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11923         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11924         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11925         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11926
11927         if (netif_running(dev)) {
11928                 tg3_full_lock(tp, 0);
11929                 __tg3_set_coalesce(tp, &tp->coal);
11930                 tg3_full_unlock(tp);
11931         }
11932         return 0;
11933 }
11934
11935 static const struct ethtool_ops tg3_ethtool_ops = {
11936         .get_settings           = tg3_get_settings,
11937         .set_settings           = tg3_set_settings,
11938         .get_drvinfo            = tg3_get_drvinfo,
11939         .get_regs_len           = tg3_get_regs_len,
11940         .get_regs               = tg3_get_regs,
11941         .get_wol                = tg3_get_wol,
11942         .set_wol                = tg3_set_wol,
11943         .get_msglevel           = tg3_get_msglevel,
11944         .set_msglevel           = tg3_set_msglevel,
11945         .nway_reset             = tg3_nway_reset,
11946         .get_link               = ethtool_op_get_link,
11947         .get_eeprom_len         = tg3_get_eeprom_len,
11948         .get_eeprom             = tg3_get_eeprom,
11949         .set_eeprom             = tg3_set_eeprom,
11950         .get_ringparam          = tg3_get_ringparam,
11951         .set_ringparam          = tg3_set_ringparam,
11952         .get_pauseparam         = tg3_get_pauseparam,
11953         .set_pauseparam         = tg3_set_pauseparam,
11954         .self_test              = tg3_self_test,
11955         .get_strings            = tg3_get_strings,
11956         .set_phys_id            = tg3_set_phys_id,
11957         .get_ethtool_stats      = tg3_get_ethtool_stats,
11958         .get_coalesce           = tg3_get_coalesce,
11959         .set_coalesce           = tg3_set_coalesce,
11960         .get_sset_count         = tg3_get_sset_count,
11961         .get_rxnfc              = tg3_get_rxnfc,
11962         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
11963         .get_rxfh_indir         = tg3_get_rxfh_indir,
11964         .set_rxfh_indir         = tg3_set_rxfh_indir,
11965 };
11966
11967 static void tg3_set_rx_mode(struct net_device *dev)
11968 {
11969         struct tg3 *tp = netdev_priv(dev);
11970
11971         if (!netif_running(dev))
11972                 return;
11973
11974         tg3_full_lock(tp, 0);
11975         __tg3_set_rx_mode(dev);
11976         tg3_full_unlock(tp);
11977 }
11978
11979 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
11980                                int new_mtu)
11981 {
11982         dev->mtu = new_mtu;
11983
11984         if (new_mtu > ETH_DATA_LEN) {
11985                 if (tg3_flag(tp, 5780_CLASS)) {
11986                         netdev_update_features(dev);
11987                         tg3_flag_clear(tp, TSO_CAPABLE);
11988                 } else {
11989                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
11990                 }
11991         } else {
11992                 if (tg3_flag(tp, 5780_CLASS)) {
11993                         tg3_flag_set(tp, TSO_CAPABLE);
11994                         netdev_update_features(dev);
11995                 }
11996                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
11997         }
11998 }
11999
12000 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12001 {
12002         struct tg3 *tp = netdev_priv(dev);
12003         int err;
12004
12005         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12006                 return -EINVAL;
12007
12008         if (!netif_running(dev)) {
12009                 /* We'll just catch it later when the
12010                  * device is up'd.
12011                  */
12012                 tg3_set_mtu(dev, tp, new_mtu);
12013                 return 0;
12014         }
12015
12016         tg3_phy_stop(tp);
12017
12018         tg3_netif_stop(tp);
12019
12020         tg3_full_lock(tp, 1);
12021
12022         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12023
12024         tg3_set_mtu(dev, tp, new_mtu);
12025
12026         err = tg3_restart_hw(tp, 0);
12027
12028         if (!err)
12029                 tg3_netif_start(tp);
12030
12031         tg3_full_unlock(tp);
12032
12033         if (!err)
12034                 tg3_phy_start(tp);
12035
12036         return err;
12037 }
12038
12039 static const struct net_device_ops tg3_netdev_ops = {
12040         .ndo_open               = tg3_open,
12041         .ndo_stop               = tg3_close,
12042         .ndo_start_xmit         = tg3_start_xmit,
12043         .ndo_get_stats64        = tg3_get_stats64,
12044         .ndo_validate_addr      = eth_validate_addr,
12045         .ndo_set_rx_mode        = tg3_set_rx_mode,
12046         .ndo_set_mac_address    = tg3_set_mac_addr,
12047         .ndo_do_ioctl           = tg3_ioctl,
12048         .ndo_tx_timeout         = tg3_tx_timeout,
12049         .ndo_change_mtu         = tg3_change_mtu,
12050         .ndo_fix_features       = tg3_fix_features,
12051         .ndo_set_features       = tg3_set_features,
12052 #ifdef CONFIG_NET_POLL_CONTROLLER
12053         .ndo_poll_controller    = tg3_poll_controller,
12054 #endif
12055 };
12056
12057 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12058 {
12059         u32 cursize, val, magic;
12060
12061         tp->nvram_size = EEPROM_CHIP_SIZE;
12062
12063         if (tg3_nvram_read(tp, 0, &magic) != 0)
12064                 return;
12065
12066         if ((magic != TG3_EEPROM_MAGIC) &&
12067             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12068             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12069                 return;
12070
12071         /*
12072          * Size the chip by reading offsets at increasing powers of two.
12073          * When we encounter our validation signature, we know the addressing
12074          * has wrapped around, and thus have our chip size.
12075          */
12076         cursize = 0x10;
12077
12078         while (cursize < tp->nvram_size) {
12079                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12080                         return;
12081
12082                 if (val == magic)
12083                         break;
12084
12085                 cursize <<= 1;
12086         }
12087
12088         tp->nvram_size = cursize;
12089 }
12090
12091 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12092 {
12093         u32 val;
12094
12095         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12096                 return;
12097
12098         /* Selfboot format */
12099         if (val != TG3_EEPROM_MAGIC) {
12100                 tg3_get_eeprom_size(tp);
12101                 return;
12102         }
12103
12104         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12105                 if (val != 0) {
12106                         /* This is confusing.  We want to operate on the
12107                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12108                          * call will read from NVRAM and byteswap the data
12109                          * according to the byteswapping settings for all
12110                          * other register accesses.  This ensures the data we
12111                          * want will always reside in the lower 16-bits.
12112                          * However, the data in NVRAM is in LE format, which
12113                          * means the data from the NVRAM read will always be
12114                          * opposite the endianness of the CPU.  The 16-bit
12115                          * byteswap then brings the data to CPU endianness.
12116                          */
12117                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12118                         return;
12119                 }
12120         }
12121         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12122 }
12123
12124 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12125 {
12126         u32 nvcfg1;
12127
12128         nvcfg1 = tr32(NVRAM_CFG1);
12129         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12130                 tg3_flag_set(tp, FLASH);
12131         } else {
12132                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12133                 tw32(NVRAM_CFG1, nvcfg1);
12134         }
12135
12136         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12137             tg3_flag(tp, 5780_CLASS)) {
12138                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12139                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12140                         tp->nvram_jedecnum = JEDEC_ATMEL;
12141                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12142                         tg3_flag_set(tp, NVRAM_BUFFERED);
12143                         break;
12144                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12145                         tp->nvram_jedecnum = JEDEC_ATMEL;
12146                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12147                         break;
12148                 case FLASH_VENDOR_ATMEL_EEPROM:
12149                         tp->nvram_jedecnum = JEDEC_ATMEL;
12150                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12151                         tg3_flag_set(tp, NVRAM_BUFFERED);
12152                         break;
12153                 case FLASH_VENDOR_ST:
12154                         tp->nvram_jedecnum = JEDEC_ST;
12155                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12156                         tg3_flag_set(tp, NVRAM_BUFFERED);
12157                         break;
12158                 case FLASH_VENDOR_SAIFUN:
12159                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12160                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12161                         break;
12162                 case FLASH_VENDOR_SST_SMALL:
12163                 case FLASH_VENDOR_SST_LARGE:
12164                         tp->nvram_jedecnum = JEDEC_SST;
12165                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12166                         break;
12167                 }
12168         } else {
12169                 tp->nvram_jedecnum = JEDEC_ATMEL;
12170                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12171                 tg3_flag_set(tp, NVRAM_BUFFERED);
12172         }
12173 }
12174
12175 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12176 {
12177         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12178         case FLASH_5752PAGE_SIZE_256:
12179                 tp->nvram_pagesize = 256;
12180                 break;
12181         case FLASH_5752PAGE_SIZE_512:
12182                 tp->nvram_pagesize = 512;
12183                 break;
12184         case FLASH_5752PAGE_SIZE_1K:
12185                 tp->nvram_pagesize = 1024;
12186                 break;
12187         case FLASH_5752PAGE_SIZE_2K:
12188                 tp->nvram_pagesize = 2048;
12189                 break;
12190         case FLASH_5752PAGE_SIZE_4K:
12191                 tp->nvram_pagesize = 4096;
12192                 break;
12193         case FLASH_5752PAGE_SIZE_264:
12194                 tp->nvram_pagesize = 264;
12195                 break;
12196         case FLASH_5752PAGE_SIZE_528:
12197                 tp->nvram_pagesize = 528;
12198                 break;
12199         }
12200 }
12201
12202 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12203 {
12204         u32 nvcfg1;
12205
12206         nvcfg1 = tr32(NVRAM_CFG1);
12207
12208         /* NVRAM protection for TPM */
12209         if (nvcfg1 & (1 << 27))
12210                 tg3_flag_set(tp, PROTECTED_NVRAM);
12211
12212         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12213         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12214         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12215                 tp->nvram_jedecnum = JEDEC_ATMEL;
12216                 tg3_flag_set(tp, NVRAM_BUFFERED);
12217                 break;
12218         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12219                 tp->nvram_jedecnum = JEDEC_ATMEL;
12220                 tg3_flag_set(tp, NVRAM_BUFFERED);
12221                 tg3_flag_set(tp, FLASH);
12222                 break;
12223         case FLASH_5752VENDOR_ST_M45PE10:
12224         case FLASH_5752VENDOR_ST_M45PE20:
12225         case FLASH_5752VENDOR_ST_M45PE40:
12226                 tp->nvram_jedecnum = JEDEC_ST;
12227                 tg3_flag_set(tp, NVRAM_BUFFERED);
12228                 tg3_flag_set(tp, FLASH);
12229                 break;
12230         }
12231
12232         if (tg3_flag(tp, FLASH)) {
12233                 tg3_nvram_get_pagesize(tp, nvcfg1);
12234         } else {
12235                 /* For eeprom, set pagesize to maximum eeprom size */
12236                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12237
12238                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12239                 tw32(NVRAM_CFG1, nvcfg1);
12240         }
12241 }
12242
12243 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12244 {
12245         u32 nvcfg1, protect = 0;
12246
12247         nvcfg1 = tr32(NVRAM_CFG1);
12248
12249         /* NVRAM protection for TPM */
12250         if (nvcfg1 & (1 << 27)) {
12251                 tg3_flag_set(tp, PROTECTED_NVRAM);
12252                 protect = 1;
12253         }
12254
12255         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12256         switch (nvcfg1) {
12257         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12258         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12259         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12260         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12261                 tp->nvram_jedecnum = JEDEC_ATMEL;
12262                 tg3_flag_set(tp, NVRAM_BUFFERED);
12263                 tg3_flag_set(tp, FLASH);
12264                 tp->nvram_pagesize = 264;
12265                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12266                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12267                         tp->nvram_size = (protect ? 0x3e200 :
12268                                           TG3_NVRAM_SIZE_512KB);
12269                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12270                         tp->nvram_size = (protect ? 0x1f200 :
12271                                           TG3_NVRAM_SIZE_256KB);
12272                 else
12273                         tp->nvram_size = (protect ? 0x1f200 :
12274                                           TG3_NVRAM_SIZE_128KB);
12275                 break;
12276         case FLASH_5752VENDOR_ST_M45PE10:
12277         case FLASH_5752VENDOR_ST_M45PE20:
12278         case FLASH_5752VENDOR_ST_M45PE40:
12279                 tp->nvram_jedecnum = JEDEC_ST;
12280                 tg3_flag_set(tp, NVRAM_BUFFERED);
12281                 tg3_flag_set(tp, FLASH);
12282                 tp->nvram_pagesize = 256;
12283                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12284                         tp->nvram_size = (protect ?
12285                                           TG3_NVRAM_SIZE_64KB :
12286                                           TG3_NVRAM_SIZE_128KB);
12287                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12288                         tp->nvram_size = (protect ?
12289                                           TG3_NVRAM_SIZE_64KB :
12290                                           TG3_NVRAM_SIZE_256KB);
12291                 else
12292                         tp->nvram_size = (protect ?
12293                                           TG3_NVRAM_SIZE_128KB :
12294                                           TG3_NVRAM_SIZE_512KB);
12295                 break;
12296         }
12297 }
12298
12299 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12300 {
12301         u32 nvcfg1;
12302
12303         nvcfg1 = tr32(NVRAM_CFG1);
12304
12305         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12306         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12307         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12308         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12309         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12310                 tp->nvram_jedecnum = JEDEC_ATMEL;
12311                 tg3_flag_set(tp, NVRAM_BUFFERED);
12312                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12313
12314                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12315                 tw32(NVRAM_CFG1, nvcfg1);
12316                 break;
12317         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12318         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12319         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12320         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12321                 tp->nvram_jedecnum = JEDEC_ATMEL;
12322                 tg3_flag_set(tp, NVRAM_BUFFERED);
12323                 tg3_flag_set(tp, FLASH);
12324                 tp->nvram_pagesize = 264;
12325                 break;
12326         case FLASH_5752VENDOR_ST_M45PE10:
12327         case FLASH_5752VENDOR_ST_M45PE20:
12328         case FLASH_5752VENDOR_ST_M45PE40:
12329                 tp->nvram_jedecnum = JEDEC_ST;
12330                 tg3_flag_set(tp, NVRAM_BUFFERED);
12331                 tg3_flag_set(tp, FLASH);
12332                 tp->nvram_pagesize = 256;
12333                 break;
12334         }
12335 }
12336
12337 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12338 {
12339         u32 nvcfg1, protect = 0;
12340
12341         nvcfg1 = tr32(NVRAM_CFG1);
12342
12343         /* NVRAM protection for TPM */
12344         if (nvcfg1 & (1 << 27)) {
12345                 tg3_flag_set(tp, PROTECTED_NVRAM);
12346                 protect = 1;
12347         }
12348
12349         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12350         switch (nvcfg1) {
12351         case FLASH_5761VENDOR_ATMEL_ADB021D:
12352         case FLASH_5761VENDOR_ATMEL_ADB041D:
12353         case FLASH_5761VENDOR_ATMEL_ADB081D:
12354         case FLASH_5761VENDOR_ATMEL_ADB161D:
12355         case FLASH_5761VENDOR_ATMEL_MDB021D:
12356         case FLASH_5761VENDOR_ATMEL_MDB041D:
12357         case FLASH_5761VENDOR_ATMEL_MDB081D:
12358         case FLASH_5761VENDOR_ATMEL_MDB161D:
12359                 tp->nvram_jedecnum = JEDEC_ATMEL;
12360                 tg3_flag_set(tp, NVRAM_BUFFERED);
12361                 tg3_flag_set(tp, FLASH);
12362                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12363                 tp->nvram_pagesize = 256;
12364                 break;
12365         case FLASH_5761VENDOR_ST_A_M45PE20:
12366         case FLASH_5761VENDOR_ST_A_M45PE40:
12367         case FLASH_5761VENDOR_ST_A_M45PE80:
12368         case FLASH_5761VENDOR_ST_A_M45PE16:
12369         case FLASH_5761VENDOR_ST_M_M45PE20:
12370         case FLASH_5761VENDOR_ST_M_M45PE40:
12371         case FLASH_5761VENDOR_ST_M_M45PE80:
12372         case FLASH_5761VENDOR_ST_M_M45PE16:
12373                 tp->nvram_jedecnum = JEDEC_ST;
12374                 tg3_flag_set(tp, NVRAM_BUFFERED);
12375                 tg3_flag_set(tp, FLASH);
12376                 tp->nvram_pagesize = 256;
12377                 break;
12378         }
12379
12380         if (protect) {
12381                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12382         } else {
12383                 switch (nvcfg1) {
12384                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12385                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12386                 case FLASH_5761VENDOR_ST_A_M45PE16:
12387                 case FLASH_5761VENDOR_ST_M_M45PE16:
12388                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12389                         break;
12390                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12391                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12392                 case FLASH_5761VENDOR_ST_A_M45PE80:
12393                 case FLASH_5761VENDOR_ST_M_M45PE80:
12394                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12395                         break;
12396                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12397                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12398                 case FLASH_5761VENDOR_ST_A_M45PE40:
12399                 case FLASH_5761VENDOR_ST_M_M45PE40:
12400                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12401                         break;
12402                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12403                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12404                 case FLASH_5761VENDOR_ST_A_M45PE20:
12405                 case FLASH_5761VENDOR_ST_M_M45PE20:
12406                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12407                         break;
12408                 }
12409         }
12410 }
12411
12412 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12413 {
12414         tp->nvram_jedecnum = JEDEC_ATMEL;
12415         tg3_flag_set(tp, NVRAM_BUFFERED);
12416         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12417 }
12418
12419 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12420 {
12421         u32 nvcfg1;
12422
12423         nvcfg1 = tr32(NVRAM_CFG1);
12424
12425         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12426         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12427         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12428                 tp->nvram_jedecnum = JEDEC_ATMEL;
12429                 tg3_flag_set(tp, NVRAM_BUFFERED);
12430                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12431
12432                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12433                 tw32(NVRAM_CFG1, nvcfg1);
12434                 return;
12435         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12436         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12437         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12438         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12439         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12440         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12441         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12442                 tp->nvram_jedecnum = JEDEC_ATMEL;
12443                 tg3_flag_set(tp, NVRAM_BUFFERED);
12444                 tg3_flag_set(tp, FLASH);
12445
12446                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12447                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12448                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12449                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12450                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12451                         break;
12452                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12453                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12454                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12455                         break;
12456                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12457                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12458                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12459                         break;
12460                 }
12461                 break;
12462         case FLASH_5752VENDOR_ST_M45PE10:
12463         case FLASH_5752VENDOR_ST_M45PE20:
12464         case FLASH_5752VENDOR_ST_M45PE40:
12465                 tp->nvram_jedecnum = JEDEC_ST;
12466                 tg3_flag_set(tp, NVRAM_BUFFERED);
12467                 tg3_flag_set(tp, FLASH);
12468
12469                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12470                 case FLASH_5752VENDOR_ST_M45PE10:
12471                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12472                         break;
12473                 case FLASH_5752VENDOR_ST_M45PE20:
12474                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12475                         break;
12476                 case FLASH_5752VENDOR_ST_M45PE40:
12477                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12478                         break;
12479                 }
12480                 break;
12481         default:
12482                 tg3_flag_set(tp, NO_NVRAM);
12483                 return;
12484         }
12485
12486         tg3_nvram_get_pagesize(tp, nvcfg1);
12487         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12488                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12489 }
12490
12491
12492 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12493 {
12494         u32 nvcfg1;
12495
12496         nvcfg1 = tr32(NVRAM_CFG1);
12497
12498         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12499         case FLASH_5717VENDOR_ATMEL_EEPROM:
12500         case FLASH_5717VENDOR_MICRO_EEPROM:
12501                 tp->nvram_jedecnum = JEDEC_ATMEL;
12502                 tg3_flag_set(tp, NVRAM_BUFFERED);
12503                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12504
12505                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12506                 tw32(NVRAM_CFG1, nvcfg1);
12507                 return;
12508         case FLASH_5717VENDOR_ATMEL_MDB011D:
12509         case FLASH_5717VENDOR_ATMEL_ADB011B:
12510         case FLASH_5717VENDOR_ATMEL_ADB011D:
12511         case FLASH_5717VENDOR_ATMEL_MDB021D:
12512         case FLASH_5717VENDOR_ATMEL_ADB021B:
12513         case FLASH_5717VENDOR_ATMEL_ADB021D:
12514         case FLASH_5717VENDOR_ATMEL_45USPT:
12515                 tp->nvram_jedecnum = JEDEC_ATMEL;
12516                 tg3_flag_set(tp, NVRAM_BUFFERED);
12517                 tg3_flag_set(tp, FLASH);
12518
12519                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12520                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12521                         /* Detect size with tg3_nvram_get_size() */
12522                         break;
12523                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12524                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12525                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12526                         break;
12527                 default:
12528                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12529                         break;
12530                 }
12531                 break;
12532         case FLASH_5717VENDOR_ST_M_M25PE10:
12533         case FLASH_5717VENDOR_ST_A_M25PE10:
12534         case FLASH_5717VENDOR_ST_M_M45PE10:
12535         case FLASH_5717VENDOR_ST_A_M45PE10:
12536         case FLASH_5717VENDOR_ST_M_M25PE20:
12537         case FLASH_5717VENDOR_ST_A_M25PE20:
12538         case FLASH_5717VENDOR_ST_M_M45PE20:
12539         case FLASH_5717VENDOR_ST_A_M45PE20:
12540         case FLASH_5717VENDOR_ST_25USPT:
12541         case FLASH_5717VENDOR_ST_45USPT:
12542                 tp->nvram_jedecnum = JEDEC_ST;
12543                 tg3_flag_set(tp, NVRAM_BUFFERED);
12544                 tg3_flag_set(tp, FLASH);
12545
12546                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12547                 case FLASH_5717VENDOR_ST_M_M25PE20:
12548                 case FLASH_5717VENDOR_ST_M_M45PE20:
12549                         /* Detect size with tg3_nvram_get_size() */
12550                         break;
12551                 case FLASH_5717VENDOR_ST_A_M25PE20:
12552                 case FLASH_5717VENDOR_ST_A_M45PE20:
12553                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12554                         break;
12555                 default:
12556                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12557                         break;
12558                 }
12559                 break;
12560         default:
12561                 tg3_flag_set(tp, NO_NVRAM);
12562                 return;
12563         }
12564
12565         tg3_nvram_get_pagesize(tp, nvcfg1);
12566         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12567                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12568 }
12569
12570 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12571 {
12572         u32 nvcfg1, nvmpinstrp;
12573
12574         nvcfg1 = tr32(NVRAM_CFG1);
12575         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12576
12577         switch (nvmpinstrp) {
12578         case FLASH_5720_EEPROM_HD:
12579         case FLASH_5720_EEPROM_LD:
12580                 tp->nvram_jedecnum = JEDEC_ATMEL;
12581                 tg3_flag_set(tp, NVRAM_BUFFERED);
12582
12583                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12584                 tw32(NVRAM_CFG1, nvcfg1);
12585                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12586                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12587                 else
12588                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12589                 return;
12590         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12591         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12592         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12593         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12594         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12595         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12596         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12597         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12598         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12599         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12600         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12601         case FLASH_5720VENDOR_ATMEL_45USPT:
12602                 tp->nvram_jedecnum = JEDEC_ATMEL;
12603                 tg3_flag_set(tp, NVRAM_BUFFERED);
12604                 tg3_flag_set(tp, FLASH);
12605
12606                 switch (nvmpinstrp) {
12607                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12608                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12609                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12610                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12611                         break;
12612                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12613                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12614                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12615                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12616                         break;
12617                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12618                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12619                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12620                         break;
12621                 default:
12622                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12623                         break;
12624                 }
12625                 break;
12626         case FLASH_5720VENDOR_M_ST_M25PE10:
12627         case FLASH_5720VENDOR_M_ST_M45PE10:
12628         case FLASH_5720VENDOR_A_ST_M25PE10:
12629         case FLASH_5720VENDOR_A_ST_M45PE10:
12630         case FLASH_5720VENDOR_M_ST_M25PE20:
12631         case FLASH_5720VENDOR_M_ST_M45PE20:
12632         case FLASH_5720VENDOR_A_ST_M25PE20:
12633         case FLASH_5720VENDOR_A_ST_M45PE20:
12634         case FLASH_5720VENDOR_M_ST_M25PE40:
12635         case FLASH_5720VENDOR_M_ST_M45PE40:
12636         case FLASH_5720VENDOR_A_ST_M25PE40:
12637         case FLASH_5720VENDOR_A_ST_M45PE40:
12638         case FLASH_5720VENDOR_M_ST_M25PE80:
12639         case FLASH_5720VENDOR_M_ST_M45PE80:
12640         case FLASH_5720VENDOR_A_ST_M25PE80:
12641         case FLASH_5720VENDOR_A_ST_M45PE80:
12642         case FLASH_5720VENDOR_ST_25USPT:
12643         case FLASH_5720VENDOR_ST_45USPT:
12644                 tp->nvram_jedecnum = JEDEC_ST;
12645                 tg3_flag_set(tp, NVRAM_BUFFERED);
12646                 tg3_flag_set(tp, FLASH);
12647
12648                 switch (nvmpinstrp) {
12649                 case FLASH_5720VENDOR_M_ST_M25PE20:
12650                 case FLASH_5720VENDOR_M_ST_M45PE20:
12651                 case FLASH_5720VENDOR_A_ST_M25PE20:
12652                 case FLASH_5720VENDOR_A_ST_M45PE20:
12653                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12654                         break;
12655                 case FLASH_5720VENDOR_M_ST_M25PE40:
12656                 case FLASH_5720VENDOR_M_ST_M45PE40:
12657                 case FLASH_5720VENDOR_A_ST_M25PE40:
12658                 case FLASH_5720VENDOR_A_ST_M45PE40:
12659                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12660                         break;
12661                 case FLASH_5720VENDOR_M_ST_M25PE80:
12662                 case FLASH_5720VENDOR_M_ST_M45PE80:
12663                 case FLASH_5720VENDOR_A_ST_M25PE80:
12664                 case FLASH_5720VENDOR_A_ST_M45PE80:
12665                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12666                         break;
12667                 default:
12668                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12669                         break;
12670                 }
12671                 break;
12672         default:
12673                 tg3_flag_set(tp, NO_NVRAM);
12674                 return;
12675         }
12676
12677         tg3_nvram_get_pagesize(tp, nvcfg1);
12678         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12679                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12680 }
12681
12682 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12683 static void __devinit tg3_nvram_init(struct tg3 *tp)
12684 {
12685         tw32_f(GRC_EEPROM_ADDR,
12686              (EEPROM_ADDR_FSM_RESET |
12687               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12688                EEPROM_ADDR_CLKPERD_SHIFT)));
12689
12690         msleep(1);
12691
12692         /* Enable seeprom accesses. */
12693         tw32_f(GRC_LOCAL_CTRL,
12694              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12695         udelay(100);
12696
12697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12698             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12699                 tg3_flag_set(tp, NVRAM);
12700
12701                 if (tg3_nvram_lock(tp)) {
12702                         netdev_warn(tp->dev,
12703                                     "Cannot get nvram lock, %s failed\n",
12704                                     __func__);
12705                         return;
12706                 }
12707                 tg3_enable_nvram_access(tp);
12708
12709                 tp->nvram_size = 0;
12710
12711                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12712                         tg3_get_5752_nvram_info(tp);
12713                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12714                         tg3_get_5755_nvram_info(tp);
12715                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12716                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12717                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12718                         tg3_get_5787_nvram_info(tp);
12719                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12720                         tg3_get_5761_nvram_info(tp);
12721                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12722                         tg3_get_5906_nvram_info(tp);
12723                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12724                          tg3_flag(tp, 57765_CLASS))
12725                         tg3_get_57780_nvram_info(tp);
12726                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12727                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12728                         tg3_get_5717_nvram_info(tp);
12729                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12730                         tg3_get_5720_nvram_info(tp);
12731                 else
12732                         tg3_get_nvram_info(tp);
12733
12734                 if (tp->nvram_size == 0)
12735                         tg3_get_nvram_size(tp);
12736
12737                 tg3_disable_nvram_access(tp);
12738                 tg3_nvram_unlock(tp);
12739
12740         } else {
12741                 tg3_flag_clear(tp, NVRAM);
12742                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12743
12744                 tg3_get_eeprom_size(tp);
12745         }
12746 }
12747
12748 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12749                                     u32 offset, u32 len, u8 *buf)
12750 {
12751         int i, j, rc = 0;
12752         u32 val;
12753
12754         for (i = 0; i < len; i += 4) {
12755                 u32 addr;
12756                 __be32 data;
12757
12758                 addr = offset + i;
12759
12760                 memcpy(&data, buf + i, 4);
12761
12762                 /*
12763                  * The SEEPROM interface expects the data to always be opposite
12764                  * the native endian format.  We accomplish this by reversing
12765                  * all the operations that would have been performed on the
12766                  * data from a call to tg3_nvram_read_be32().
12767                  */
12768                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12769
12770                 val = tr32(GRC_EEPROM_ADDR);
12771                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12772
12773                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12774                         EEPROM_ADDR_READ);
12775                 tw32(GRC_EEPROM_ADDR, val |
12776                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12777                         (addr & EEPROM_ADDR_ADDR_MASK) |
12778                         EEPROM_ADDR_START |
12779                         EEPROM_ADDR_WRITE);
12780
12781                 for (j = 0; j < 1000; j++) {
12782                         val = tr32(GRC_EEPROM_ADDR);
12783
12784                         if (val & EEPROM_ADDR_COMPLETE)
12785                                 break;
12786                         msleep(1);
12787                 }
12788                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12789                         rc = -EBUSY;
12790                         break;
12791                 }
12792         }
12793
12794         return rc;
12795 }
12796
12797 /* offset and length are dword aligned */
12798 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12799                 u8 *buf)
12800 {
12801         int ret = 0;
12802         u32 pagesize = tp->nvram_pagesize;
12803         u32 pagemask = pagesize - 1;
12804         u32 nvram_cmd;
12805         u8 *tmp;
12806
12807         tmp = kmalloc(pagesize, GFP_KERNEL);
12808         if (tmp == NULL)
12809                 return -ENOMEM;
12810
12811         while (len) {
12812                 int j;
12813                 u32 phy_addr, page_off, size;
12814
12815                 phy_addr = offset & ~pagemask;
12816
12817                 for (j = 0; j < pagesize; j += 4) {
12818                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12819                                                   (__be32 *) (tmp + j));
12820                         if (ret)
12821                                 break;
12822                 }
12823                 if (ret)
12824                         break;
12825
12826                 page_off = offset & pagemask;
12827                 size = pagesize;
12828                 if (len < size)
12829                         size = len;
12830
12831                 len -= size;
12832
12833                 memcpy(tmp + page_off, buf, size);
12834
12835                 offset = offset + (pagesize - page_off);
12836
12837                 tg3_enable_nvram_access(tp);
12838
12839                 /*
12840                  * Before we can erase the flash page, we need
12841                  * to issue a special "write enable" command.
12842                  */
12843                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12844
12845                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12846                         break;
12847
12848                 /* Erase the target page */
12849                 tw32(NVRAM_ADDR, phy_addr);
12850
12851                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12852                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12853
12854                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12855                         break;
12856
12857                 /* Issue another write enable to start the write. */
12858                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12859
12860                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12861                         break;
12862
12863                 for (j = 0; j < pagesize; j += 4) {
12864                         __be32 data;
12865
12866                         data = *((__be32 *) (tmp + j));
12867
12868                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12869
12870                         tw32(NVRAM_ADDR, phy_addr + j);
12871
12872                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12873                                 NVRAM_CMD_WR;
12874
12875                         if (j == 0)
12876                                 nvram_cmd |= NVRAM_CMD_FIRST;
12877                         else if (j == (pagesize - 4))
12878                                 nvram_cmd |= NVRAM_CMD_LAST;
12879
12880                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12881                                 break;
12882                 }
12883                 if (ret)
12884                         break;
12885         }
12886
12887         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12888         tg3_nvram_exec_cmd(tp, nvram_cmd);
12889
12890         kfree(tmp);
12891
12892         return ret;
12893 }
12894
12895 /* offset and length are dword aligned */
12896 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12897                 u8 *buf)
12898 {
12899         int i, ret = 0;
12900
12901         for (i = 0; i < len; i += 4, offset += 4) {
12902                 u32 page_off, phy_addr, nvram_cmd;
12903                 __be32 data;
12904
12905                 memcpy(&data, buf + i, 4);
12906                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12907
12908                 page_off = offset % tp->nvram_pagesize;
12909
12910                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12911
12912                 tw32(NVRAM_ADDR, phy_addr);
12913
12914                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12915
12916                 if (page_off == 0 || i == 0)
12917                         nvram_cmd |= NVRAM_CMD_FIRST;
12918                 if (page_off == (tp->nvram_pagesize - 4))
12919                         nvram_cmd |= NVRAM_CMD_LAST;
12920
12921                 if (i == (len - 4))
12922                         nvram_cmd |= NVRAM_CMD_LAST;
12923
12924                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12925                     !tg3_flag(tp, 5755_PLUS) &&
12926                     (tp->nvram_jedecnum == JEDEC_ST) &&
12927                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12928
12929                         if ((ret = tg3_nvram_exec_cmd(tp,
12930                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12931                                 NVRAM_CMD_DONE)))
12932
12933                                 break;
12934                 }
12935                 if (!tg3_flag(tp, FLASH)) {
12936                         /* We always do complete word writes to eeprom. */
12937                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12938                 }
12939
12940                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12941                         break;
12942         }
12943         return ret;
12944 }
12945
12946 /* offset and length are dword aligned */
12947 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12948 {
12949         int ret;
12950
12951         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12952                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12953                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12954                 udelay(40);
12955         }
12956
12957         if (!tg3_flag(tp, NVRAM)) {
12958                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12959         } else {
12960                 u32 grc_mode;
12961
12962                 ret = tg3_nvram_lock(tp);
12963                 if (ret)
12964                         return ret;
12965
12966                 tg3_enable_nvram_access(tp);
12967                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12968                         tw32(NVRAM_WRITE1, 0x406);
12969
12970                 grc_mode = tr32(GRC_MODE);
12971                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12972
12973                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12974                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12975                                 buf);
12976                 } else {
12977                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12978                                 buf);
12979                 }
12980
12981                 grc_mode = tr32(GRC_MODE);
12982                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12983
12984                 tg3_disable_nvram_access(tp);
12985                 tg3_nvram_unlock(tp);
12986         }
12987
12988         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12989                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12990                 udelay(40);
12991         }
12992
12993         return ret;
12994 }
12995
12996 struct subsys_tbl_ent {
12997         u16 subsys_vendor, subsys_devid;
12998         u32 phy_id;
12999 };
13000
13001 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13002         /* Broadcom boards. */
13003         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13004           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13005         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13006           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13008           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13010           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13012           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13014           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13018           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13020           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13022           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13025
13026         /* 3com boards. */
13027         { TG3PCI_SUBVENDOR_ID_3COM,
13028           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13029         { TG3PCI_SUBVENDOR_ID_3COM,
13030           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13031         { TG3PCI_SUBVENDOR_ID_3COM,
13032           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13033         { TG3PCI_SUBVENDOR_ID_3COM,
13034           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13035         { TG3PCI_SUBVENDOR_ID_3COM,
13036           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13037
13038         /* DELL boards. */
13039         { TG3PCI_SUBVENDOR_ID_DELL,
13040           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13041         { TG3PCI_SUBVENDOR_ID_DELL,
13042           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13043         { TG3PCI_SUBVENDOR_ID_DELL,
13044           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13045         { TG3PCI_SUBVENDOR_ID_DELL,
13046           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13047
13048         /* Compaq boards. */
13049         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13050           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13051         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13052           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13053         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13054           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13055         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13056           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13057         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13058           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13059
13060         /* IBM boards. */
13061         { TG3PCI_SUBVENDOR_ID_IBM,
13062           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13063 };
13064
13065 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13066 {
13067         int i;
13068
13069         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13070                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13071                      tp->pdev->subsystem_vendor) &&
13072                     (subsys_id_to_phy_id[i].subsys_devid ==
13073                      tp->pdev->subsystem_device))
13074                         return &subsys_id_to_phy_id[i];
13075         }
13076         return NULL;
13077 }
13078
13079 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13080 {
13081         u32 val;
13082
13083         tp->phy_id = TG3_PHY_ID_INVALID;
13084         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13085
13086         /* Assume an onboard device and WOL capable by default.  */
13087         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13088         tg3_flag_set(tp, WOL_CAP);
13089
13090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13091                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13092                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13093                         tg3_flag_set(tp, IS_NIC);
13094                 }
13095                 val = tr32(VCPU_CFGSHDW);
13096                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13097                         tg3_flag_set(tp, ASPM_WORKAROUND);
13098                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13099                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13100                         tg3_flag_set(tp, WOL_ENABLE);
13101                         device_set_wakeup_enable(&tp->pdev->dev, true);
13102                 }
13103                 goto done;
13104         }
13105
13106         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13107         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13108                 u32 nic_cfg, led_cfg;
13109                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13110                 int eeprom_phy_serdes = 0;
13111
13112                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13113                 tp->nic_sram_data_cfg = nic_cfg;
13114
13115                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13116                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13118                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13119                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13120                     (ver > 0) && (ver < 0x100))
13121                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13122
13123                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13124                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13125
13126                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13127                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13128                         eeprom_phy_serdes = 1;
13129
13130                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13131                 if (nic_phy_id != 0) {
13132                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13133                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13134
13135                         eeprom_phy_id  = (id1 >> 16) << 10;
13136                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13137                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13138                 } else
13139                         eeprom_phy_id = 0;
13140
13141                 tp->phy_id = eeprom_phy_id;
13142                 if (eeprom_phy_serdes) {
13143                         if (!tg3_flag(tp, 5705_PLUS))
13144                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13145                         else
13146                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13147                 }
13148
13149                 if (tg3_flag(tp, 5750_PLUS))
13150                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13151                                     SHASTA_EXT_LED_MODE_MASK);
13152                 else
13153                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13154
13155                 switch (led_cfg) {
13156                 default:
13157                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13158                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13159                         break;
13160
13161                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13162                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13163                         break;
13164
13165                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13166                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13167
13168                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13169                          * read on some older 5700/5701 bootcode.
13170                          */
13171                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13172                             ASIC_REV_5700 ||
13173                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13174                             ASIC_REV_5701)
13175                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13176
13177                         break;
13178
13179                 case SHASTA_EXT_LED_SHARED:
13180                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13181                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13182                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13183                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13184                                                  LED_CTRL_MODE_PHY_2);
13185                         break;
13186
13187                 case SHASTA_EXT_LED_MAC:
13188                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13189                         break;
13190
13191                 case SHASTA_EXT_LED_COMBO:
13192                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13193                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13194                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13195                                                  LED_CTRL_MODE_PHY_2);
13196                         break;
13197
13198                 }
13199
13200                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13201                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13202                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13203                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13204
13205                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13206                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13207
13208                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13209                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13210                         if ((tp->pdev->subsystem_vendor ==
13211                              PCI_VENDOR_ID_ARIMA) &&
13212                             (tp->pdev->subsystem_device == 0x205a ||
13213                              tp->pdev->subsystem_device == 0x2063))
13214                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13215                 } else {
13216                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13217                         tg3_flag_set(tp, IS_NIC);
13218                 }
13219
13220                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13221                         tg3_flag_set(tp, ENABLE_ASF);
13222                         if (tg3_flag(tp, 5750_PLUS))
13223                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13224                 }
13225
13226                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13227                     tg3_flag(tp, 5750_PLUS))
13228                         tg3_flag_set(tp, ENABLE_APE);
13229
13230                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13231                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13232                         tg3_flag_clear(tp, WOL_CAP);
13233
13234                 if (tg3_flag(tp, WOL_CAP) &&
13235                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13236                         tg3_flag_set(tp, WOL_ENABLE);
13237                         device_set_wakeup_enable(&tp->pdev->dev, true);
13238                 }
13239
13240                 if (cfg2 & (1 << 17))
13241                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13242
13243                 /* serdes signal pre-emphasis in register 0x590 set by */
13244                 /* bootcode if bit 18 is set */
13245                 if (cfg2 & (1 << 18))
13246                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13247
13248                 if ((tg3_flag(tp, 57765_PLUS) ||
13249                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13250                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13251                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13252                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13253
13254                 if (tg3_flag(tp, PCI_EXPRESS) &&
13255                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13256                     !tg3_flag(tp, 57765_PLUS)) {
13257                         u32 cfg3;
13258
13259                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13260                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13261                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13262                 }
13263
13264                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13265                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13266                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13267                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13268                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13269                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13270         }
13271 done:
13272         if (tg3_flag(tp, WOL_CAP))
13273                 device_set_wakeup_enable(&tp->pdev->dev,
13274                                          tg3_flag(tp, WOL_ENABLE));
13275         else
13276                 device_set_wakeup_capable(&tp->pdev->dev, false);
13277 }
13278
13279 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13280 {
13281         int i;
13282         u32 val;
13283
13284         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13285         tw32(OTP_CTRL, cmd);
13286
13287         /* Wait for up to 1 ms for command to execute. */
13288         for (i = 0; i < 100; i++) {
13289                 val = tr32(OTP_STATUS);
13290                 if (val & OTP_STATUS_CMD_DONE)
13291                         break;
13292                 udelay(10);
13293         }
13294
13295         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13296 }
13297
13298 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13299  * configuration is a 32-bit value that straddles the alignment boundary.
13300  * We do two 32-bit reads and then shift and merge the results.
13301  */
13302 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13303 {
13304         u32 bhalf_otp, thalf_otp;
13305
13306         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13307
13308         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13309                 return 0;
13310
13311         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13312
13313         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13314                 return 0;
13315
13316         thalf_otp = tr32(OTP_READ_DATA);
13317
13318         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13319
13320         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13321                 return 0;
13322
13323         bhalf_otp = tr32(OTP_READ_DATA);
13324
13325         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13326 }
13327
13328 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13329 {
13330         u32 adv = ADVERTISED_Autoneg;
13331
13332         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13333                 adv |= ADVERTISED_1000baseT_Half |
13334                        ADVERTISED_1000baseT_Full;
13335
13336         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13337                 adv |= ADVERTISED_100baseT_Half |
13338                        ADVERTISED_100baseT_Full |
13339                        ADVERTISED_10baseT_Half |
13340                        ADVERTISED_10baseT_Full |
13341                        ADVERTISED_TP;
13342         else
13343                 adv |= ADVERTISED_FIBRE;
13344
13345         tp->link_config.advertising = adv;
13346         tp->link_config.speed = SPEED_INVALID;
13347         tp->link_config.duplex = DUPLEX_INVALID;
13348         tp->link_config.autoneg = AUTONEG_ENABLE;
13349         tp->link_config.active_speed = SPEED_INVALID;
13350         tp->link_config.active_duplex = DUPLEX_INVALID;
13351         tp->link_config.orig_speed = SPEED_INVALID;
13352         tp->link_config.orig_duplex = DUPLEX_INVALID;
13353         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13354 }
13355
13356 static int __devinit tg3_phy_probe(struct tg3 *tp)
13357 {
13358         u32 hw_phy_id_1, hw_phy_id_2;
13359         u32 hw_phy_id, hw_phy_id_masked;
13360         int err;
13361
13362         /* flow control autonegotiation is default behavior */
13363         tg3_flag_set(tp, PAUSE_AUTONEG);
13364         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13365
13366         if (tg3_flag(tp, USE_PHYLIB))
13367                 return tg3_phy_init(tp);
13368
13369         /* Reading the PHY ID register can conflict with ASF
13370          * firmware access to the PHY hardware.
13371          */
13372         err = 0;
13373         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13374                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13375         } else {
13376                 /* Now read the physical PHY_ID from the chip and verify
13377                  * that it is sane.  If it doesn't look good, we fall back
13378                  * to either the hard-coded table based PHY_ID and failing
13379                  * that the value found in the eeprom area.
13380                  */
13381                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13382                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13383
13384                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13385                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13386                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13387
13388                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13389         }
13390
13391         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13392                 tp->phy_id = hw_phy_id;
13393                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13394                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13395                 else
13396                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13397         } else {
13398                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13399                         /* Do nothing, phy ID already set up in
13400                          * tg3_get_eeprom_hw_cfg().
13401                          */
13402                 } else {
13403                         struct subsys_tbl_ent *p;
13404
13405                         /* No eeprom signature?  Try the hardcoded
13406                          * subsys device table.
13407                          */
13408                         p = tg3_lookup_by_subsys(tp);
13409                         if (!p)
13410                                 return -ENODEV;
13411
13412                         tp->phy_id = p->phy_id;
13413                         if (!tp->phy_id ||
13414                             tp->phy_id == TG3_PHY_ID_BCM8002)
13415                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13416                 }
13417         }
13418
13419         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13420             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13421              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13422              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13423               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13424              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13425               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13426                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13427
13428         tg3_phy_init_link_config(tp);
13429
13430         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13431             !tg3_flag(tp, ENABLE_APE) &&
13432             !tg3_flag(tp, ENABLE_ASF)) {
13433                 u32 bmsr, dummy;
13434
13435                 tg3_readphy(tp, MII_BMSR, &bmsr);
13436                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13437                     (bmsr & BMSR_LSTATUS))
13438                         goto skip_phy_reset;
13439
13440                 err = tg3_phy_reset(tp);
13441                 if (err)
13442                         return err;
13443
13444                 tg3_phy_set_wirespeed(tp);
13445
13446                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13447                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13448                                             tp->link_config.flowctrl);
13449
13450                         tg3_writephy(tp, MII_BMCR,
13451                                      BMCR_ANENABLE | BMCR_ANRESTART);
13452                 }
13453         }
13454
13455 skip_phy_reset:
13456         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13457                 err = tg3_init_5401phy_dsp(tp);
13458                 if (err)
13459                         return err;
13460
13461                 err = tg3_init_5401phy_dsp(tp);
13462         }
13463
13464         return err;
13465 }
13466
13467 static void __devinit tg3_read_vpd(struct tg3 *tp)
13468 {
13469         u8 *vpd_data;
13470         unsigned int block_end, rosize, len;
13471         u32 vpdlen;
13472         int j, i = 0;
13473
13474         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13475         if (!vpd_data)
13476                 goto out_no_vpd;
13477
13478         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13479         if (i < 0)
13480                 goto out_not_found;
13481
13482         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13483         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13484         i += PCI_VPD_LRDT_TAG_SIZE;
13485
13486         if (block_end > vpdlen)
13487                 goto out_not_found;
13488
13489         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13490                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13491         if (j > 0) {
13492                 len = pci_vpd_info_field_size(&vpd_data[j]);
13493
13494                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13495                 if (j + len > block_end || len != 4 ||
13496                     memcmp(&vpd_data[j], "1028", 4))
13497                         goto partno;
13498
13499                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13500                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13501                 if (j < 0)
13502                         goto partno;
13503
13504                 len = pci_vpd_info_field_size(&vpd_data[j]);
13505
13506                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13507                 if (j + len > block_end)
13508                         goto partno;
13509
13510                 memcpy(tp->fw_ver, &vpd_data[j], len);
13511                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13512         }
13513
13514 partno:
13515         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13516                                       PCI_VPD_RO_KEYWORD_PARTNO);
13517         if (i < 0)
13518                 goto out_not_found;
13519
13520         len = pci_vpd_info_field_size(&vpd_data[i]);
13521
13522         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13523         if (len > TG3_BPN_SIZE ||
13524             (len + i) > vpdlen)
13525                 goto out_not_found;
13526
13527         memcpy(tp->board_part_number, &vpd_data[i], len);
13528
13529 out_not_found:
13530         kfree(vpd_data);
13531         if (tp->board_part_number[0])
13532                 return;
13533
13534 out_no_vpd:
13535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13536                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13537                         strcpy(tp->board_part_number, "BCM5717");
13538                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13539                         strcpy(tp->board_part_number, "BCM5718");
13540                 else
13541                         goto nomatch;
13542         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13543                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13544                         strcpy(tp->board_part_number, "BCM57780");
13545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13546                         strcpy(tp->board_part_number, "BCM57760");
13547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13548                         strcpy(tp->board_part_number, "BCM57790");
13549                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13550                         strcpy(tp->board_part_number, "BCM57788");
13551                 else
13552                         goto nomatch;
13553         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13554                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13555                         strcpy(tp->board_part_number, "BCM57761");
13556                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13557                         strcpy(tp->board_part_number, "BCM57765");
13558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13559                         strcpy(tp->board_part_number, "BCM57781");
13560                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13561                         strcpy(tp->board_part_number, "BCM57785");
13562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13563                         strcpy(tp->board_part_number, "BCM57791");
13564                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13565                         strcpy(tp->board_part_number, "BCM57795");
13566                 else
13567                         goto nomatch;
13568         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13569                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13570                         strcpy(tp->board_part_number, "BCM57762");
13571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13572                         strcpy(tp->board_part_number, "BCM57766");
13573                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13574                         strcpy(tp->board_part_number, "BCM57782");
13575                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13576                         strcpy(tp->board_part_number, "BCM57786");
13577                 else
13578                         goto nomatch;
13579         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13580                 strcpy(tp->board_part_number, "BCM95906");
13581         } else {
13582 nomatch:
13583                 strcpy(tp->board_part_number, "none");
13584         }
13585 }
13586
13587 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13588 {
13589         u32 val;
13590
13591         if (tg3_nvram_read(tp, offset, &val) ||
13592             (val & 0xfc000000) != 0x0c000000 ||
13593             tg3_nvram_read(tp, offset + 4, &val) ||
13594             val != 0)
13595                 return 0;
13596
13597         return 1;
13598 }
13599
13600 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13601 {
13602         u32 val, offset, start, ver_offset;
13603         int i, dst_off;
13604         bool newver = false;
13605
13606         if (tg3_nvram_read(tp, 0xc, &offset) ||
13607             tg3_nvram_read(tp, 0x4, &start))
13608                 return;
13609
13610         offset = tg3_nvram_logical_addr(tp, offset);
13611
13612         if (tg3_nvram_read(tp, offset, &val))
13613                 return;
13614
13615         if ((val & 0xfc000000) == 0x0c000000) {
13616                 if (tg3_nvram_read(tp, offset + 4, &val))
13617                         return;
13618
13619                 if (val == 0)
13620                         newver = true;
13621         }
13622
13623         dst_off = strlen(tp->fw_ver);
13624
13625         if (newver) {
13626                 if (TG3_VER_SIZE - dst_off < 16 ||
13627                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13628                         return;
13629
13630                 offset = offset + ver_offset - start;
13631                 for (i = 0; i < 16; i += 4) {
13632                         __be32 v;
13633                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13634                                 return;
13635
13636                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13637                 }
13638         } else {
13639                 u32 major, minor;
13640
13641                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13642                         return;
13643
13644                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13645                         TG3_NVM_BCVER_MAJSFT;
13646                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13647                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13648                          "v%d.%02d", major, minor);
13649         }
13650 }
13651
13652 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13653 {
13654         u32 val, major, minor;
13655
13656         /* Use native endian representation */
13657         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13658                 return;
13659
13660         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13661                 TG3_NVM_HWSB_CFG1_MAJSFT;
13662         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13663                 TG3_NVM_HWSB_CFG1_MINSFT;
13664
13665         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13666 }
13667
13668 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13669 {
13670         u32 offset, major, minor, build;
13671
13672         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13673
13674         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13675                 return;
13676
13677         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13678         case TG3_EEPROM_SB_REVISION_0:
13679                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13680                 break;
13681         case TG3_EEPROM_SB_REVISION_2:
13682                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13683                 break;
13684         case TG3_EEPROM_SB_REVISION_3:
13685                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13686                 break;
13687         case TG3_EEPROM_SB_REVISION_4:
13688                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13689                 break;
13690         case TG3_EEPROM_SB_REVISION_5:
13691                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13692                 break;
13693         case TG3_EEPROM_SB_REVISION_6:
13694                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13695                 break;
13696         default:
13697                 return;
13698         }
13699
13700         if (tg3_nvram_read(tp, offset, &val))
13701                 return;
13702
13703         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13704                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13705         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13706                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13707         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13708
13709         if (minor > 99 || build > 26)
13710                 return;
13711
13712         offset = strlen(tp->fw_ver);
13713         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13714                  " v%d.%02d", major, minor);
13715
13716         if (build > 0) {
13717                 offset = strlen(tp->fw_ver);
13718                 if (offset < TG3_VER_SIZE - 1)
13719                         tp->fw_ver[offset] = 'a' + build - 1;
13720         }
13721 }
13722
13723 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13724 {
13725         u32 val, offset, start;
13726         int i, vlen;
13727
13728         for (offset = TG3_NVM_DIR_START;
13729              offset < TG3_NVM_DIR_END;
13730              offset += TG3_NVM_DIRENT_SIZE) {
13731                 if (tg3_nvram_read(tp, offset, &val))
13732                         return;
13733
13734                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13735                         break;
13736         }
13737
13738         if (offset == TG3_NVM_DIR_END)
13739                 return;
13740
13741         if (!tg3_flag(tp, 5705_PLUS))
13742                 start = 0x08000000;
13743         else if (tg3_nvram_read(tp, offset - 4, &start))
13744                 return;
13745
13746         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13747             !tg3_fw_img_is_valid(tp, offset) ||
13748             tg3_nvram_read(tp, offset + 8, &val))
13749                 return;
13750
13751         offset += val - start;
13752
13753         vlen = strlen(tp->fw_ver);
13754
13755         tp->fw_ver[vlen++] = ',';
13756         tp->fw_ver[vlen++] = ' ';
13757
13758         for (i = 0; i < 4; i++) {
13759                 __be32 v;
13760                 if (tg3_nvram_read_be32(tp, offset, &v))
13761                         return;
13762
13763                 offset += sizeof(v);
13764
13765                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13766                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13767                         break;
13768                 }
13769
13770                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13771                 vlen += sizeof(v);
13772         }
13773 }
13774
13775 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13776 {
13777         int vlen;
13778         u32 apedata;
13779         char *fwtype;
13780
13781         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13782                 return;
13783
13784         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13785         if (apedata != APE_SEG_SIG_MAGIC)
13786                 return;
13787
13788         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13789         if (!(apedata & APE_FW_STATUS_READY))
13790                 return;
13791
13792         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13793
13794         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13795                 tg3_flag_set(tp, APE_HAS_NCSI);
13796                 fwtype = "NCSI";
13797         } else {
13798                 fwtype = "DASH";
13799         }
13800
13801         vlen = strlen(tp->fw_ver);
13802
13803         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13804                  fwtype,
13805                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13806                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13807                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13808                  (apedata & APE_FW_VERSION_BLDMSK));
13809 }
13810
13811 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13812 {
13813         u32 val;
13814         bool vpd_vers = false;
13815
13816         if (tp->fw_ver[0] != 0)
13817                 vpd_vers = true;
13818
13819         if (tg3_flag(tp, NO_NVRAM)) {
13820                 strcat(tp->fw_ver, "sb");
13821                 return;
13822         }
13823
13824         if (tg3_nvram_read(tp, 0, &val))
13825                 return;
13826
13827         if (val == TG3_EEPROM_MAGIC)
13828                 tg3_read_bc_ver(tp);
13829         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13830                 tg3_read_sb_ver(tp, val);
13831         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13832                 tg3_read_hwsb_ver(tp);
13833         else
13834                 return;
13835
13836         if (vpd_vers)
13837                 goto done;
13838
13839         if (tg3_flag(tp, ENABLE_APE)) {
13840                 if (tg3_flag(tp, ENABLE_ASF))
13841                         tg3_read_dash_ver(tp);
13842         } else if (tg3_flag(tp, ENABLE_ASF)) {
13843                 tg3_read_mgmtfw_ver(tp);
13844         }
13845
13846 done:
13847         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13848 }
13849
13850 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13851
13852 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13853 {
13854         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13855                 return TG3_RX_RET_MAX_SIZE_5717;
13856         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13857                 return TG3_RX_RET_MAX_SIZE_5700;
13858         else
13859                 return TG3_RX_RET_MAX_SIZE_5705;
13860 }
13861
13862 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13863         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13864         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13865         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13866         { },
13867 };
13868
13869 static int __devinit tg3_get_invariants(struct tg3 *tp)
13870 {
13871         u32 misc_ctrl_reg;
13872         u32 pci_state_reg, grc_misc_cfg;
13873         u32 val;
13874         u16 pci_cmd;
13875         int err;
13876
13877         /* Force memory write invalidate off.  If we leave it on,
13878          * then on 5700_BX chips we have to enable a workaround.
13879          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13880          * to match the cacheline size.  The Broadcom driver have this
13881          * workaround but turns MWI off all the times so never uses
13882          * it.  This seems to suggest that the workaround is insufficient.
13883          */
13884         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13885         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13886         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13887
13888         /* Important! -- Make sure register accesses are byteswapped
13889          * correctly.  Also, for those chips that require it, make
13890          * sure that indirect register accesses are enabled before
13891          * the first operation.
13892          */
13893         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13894                               &misc_ctrl_reg);
13895         tp->misc_host_ctrl |= (misc_ctrl_reg &
13896                                MISC_HOST_CTRL_CHIPREV);
13897         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13898                                tp->misc_host_ctrl);
13899
13900         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13901                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13903                 u32 prod_id_asic_rev;
13904
13905                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13906                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13907                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13908                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13909                         pci_read_config_dword(tp->pdev,
13910                                               TG3PCI_GEN2_PRODID_ASICREV,
13911                                               &prod_id_asic_rev);
13912                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13913                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13914                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13915                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13916                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13917                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13918                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13919                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13920                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13921                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13922                         pci_read_config_dword(tp->pdev,
13923                                               TG3PCI_GEN15_PRODID_ASICREV,
13924                                               &prod_id_asic_rev);
13925                 else
13926                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13927                                               &prod_id_asic_rev);
13928
13929                 tp->pci_chip_rev_id = prod_id_asic_rev;
13930         }
13931
13932         /* Wrong chip ID in 5752 A0. This code can be removed later
13933          * as A0 is not in production.
13934          */
13935         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13936                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13937
13938         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13939          * we need to disable memory and use config. cycles
13940          * only to access all registers. The 5702/03 chips
13941          * can mistakenly decode the special cycles from the
13942          * ICH chipsets as memory write cycles, causing corruption
13943          * of register and memory space. Only certain ICH bridges
13944          * will drive special cycles with non-zero data during the
13945          * address phase which can fall within the 5703's address
13946          * range. This is not an ICH bug as the PCI spec allows
13947          * non-zero address during special cycles. However, only
13948          * these ICH bridges are known to drive non-zero addresses
13949          * during special cycles.
13950          *
13951          * Since special cycles do not cross PCI bridges, we only
13952          * enable this workaround if the 5703 is on the secondary
13953          * bus of these ICH bridges.
13954          */
13955         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13956             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13957                 static struct tg3_dev_id {
13958                         u32     vendor;
13959                         u32     device;
13960                         u32     rev;
13961                 } ich_chipsets[] = {
13962                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13963                           PCI_ANY_ID },
13964                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13965                           PCI_ANY_ID },
13966                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13967                           0xa },
13968                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13969                           PCI_ANY_ID },
13970                         { },
13971                 };
13972                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13973                 struct pci_dev *bridge = NULL;
13974
13975                 while (pci_id->vendor != 0) {
13976                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13977                                                 bridge);
13978                         if (!bridge) {
13979                                 pci_id++;
13980                                 continue;
13981                         }
13982                         if (pci_id->rev != PCI_ANY_ID) {
13983                                 if (bridge->revision > pci_id->rev)
13984                                         continue;
13985                         }
13986                         if (bridge->subordinate &&
13987                             (bridge->subordinate->number ==
13988                              tp->pdev->bus->number)) {
13989                                 tg3_flag_set(tp, ICH_WORKAROUND);
13990                                 pci_dev_put(bridge);
13991                                 break;
13992                         }
13993                 }
13994         }
13995
13996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13997                 static struct tg3_dev_id {
13998                         u32     vendor;
13999                         u32     device;
14000                 } bridge_chipsets[] = {
14001                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14002                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14003                         { },
14004                 };
14005                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14006                 struct pci_dev *bridge = NULL;
14007
14008                 while (pci_id->vendor != 0) {
14009                         bridge = pci_get_device(pci_id->vendor,
14010                                                 pci_id->device,
14011                                                 bridge);
14012                         if (!bridge) {
14013                                 pci_id++;
14014                                 continue;
14015                         }
14016                         if (bridge->subordinate &&
14017                             (bridge->subordinate->number <=
14018                              tp->pdev->bus->number) &&
14019                             (bridge->subordinate->subordinate >=
14020                              tp->pdev->bus->number)) {
14021                                 tg3_flag_set(tp, 5701_DMA_BUG);
14022                                 pci_dev_put(bridge);
14023                                 break;
14024                         }
14025                 }
14026         }
14027
14028         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14029          * DMA addresses > 40-bit. This bridge may have other additional
14030          * 57xx devices behind it in some 4-port NIC designs for example.
14031          * Any tg3 device found behind the bridge will also need the 40-bit
14032          * DMA workaround.
14033          */
14034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14036                 tg3_flag_set(tp, 5780_CLASS);
14037                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14038                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14039         } else {
14040                 struct pci_dev *bridge = NULL;
14041
14042                 do {
14043                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14044                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14045                                                 bridge);
14046                         if (bridge && bridge->subordinate &&
14047                             (bridge->subordinate->number <=
14048                              tp->pdev->bus->number) &&
14049                             (bridge->subordinate->subordinate >=
14050                              tp->pdev->bus->number)) {
14051                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14052                                 pci_dev_put(bridge);
14053                                 break;
14054                         }
14055                 } while (bridge);
14056         }
14057
14058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14059             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14060                 tp->pdev_peer = tg3_find_peer(tp);
14061
14062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14063             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14064             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14065                 tg3_flag_set(tp, 5717_PLUS);
14066
14067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14069                 tg3_flag_set(tp, 57765_CLASS);
14070
14071         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14072                 tg3_flag_set(tp, 57765_PLUS);
14073
14074         /* Intentionally exclude ASIC_REV_5906 */
14075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14078             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14080             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14081             tg3_flag(tp, 57765_PLUS))
14082                 tg3_flag_set(tp, 5755_PLUS);
14083
14084         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14085             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14087             tg3_flag(tp, 5755_PLUS) ||
14088             tg3_flag(tp, 5780_CLASS))
14089                 tg3_flag_set(tp, 5750_PLUS);
14090
14091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14092             tg3_flag(tp, 5750_PLUS))
14093                 tg3_flag_set(tp, 5705_PLUS);
14094
14095         /* Determine TSO capabilities */
14096         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14097                 ; /* Do nothing. HW bug. */
14098         else if (tg3_flag(tp, 57765_PLUS))
14099                 tg3_flag_set(tp, HW_TSO_3);
14100         else if (tg3_flag(tp, 5755_PLUS) ||
14101                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14102                 tg3_flag_set(tp, HW_TSO_2);
14103         else if (tg3_flag(tp, 5750_PLUS)) {
14104                 tg3_flag_set(tp, HW_TSO_1);
14105                 tg3_flag_set(tp, TSO_BUG);
14106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14107                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14108                         tg3_flag_clear(tp, TSO_BUG);
14109         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14110                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14111                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14112                         tg3_flag_set(tp, TSO_BUG);
14113                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14114                         tp->fw_needed = FIRMWARE_TG3TSO5;
14115                 else
14116                         tp->fw_needed = FIRMWARE_TG3TSO;
14117         }
14118
14119         /* Selectively allow TSO based on operating conditions */
14120         if (tg3_flag(tp, HW_TSO_1) ||
14121             tg3_flag(tp, HW_TSO_2) ||
14122             tg3_flag(tp, HW_TSO_3) ||
14123             tp->fw_needed) {
14124                 /* For firmware TSO, assume ASF is disabled.
14125                  * We'll disable TSO later if we discover ASF
14126                  * is enabled in tg3_get_eeprom_hw_cfg().
14127                  */
14128                 tg3_flag_set(tp, TSO_CAPABLE);
14129         } else {
14130                 tg3_flag_clear(tp, TSO_CAPABLE);
14131                 tg3_flag_clear(tp, TSO_BUG);
14132                 tp->fw_needed = NULL;
14133         }
14134
14135         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14136                 tp->fw_needed = FIRMWARE_TG3;
14137
14138         tp->irq_max = 1;
14139
14140         if (tg3_flag(tp, 5750_PLUS)) {
14141                 tg3_flag_set(tp, SUPPORT_MSI);
14142                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14143                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14144                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14145                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14146                      tp->pdev_peer == tp->pdev))
14147                         tg3_flag_clear(tp, SUPPORT_MSI);
14148
14149                 if (tg3_flag(tp, 5755_PLUS) ||
14150                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14151                         tg3_flag_set(tp, 1SHOT_MSI);
14152                 }
14153
14154                 if (tg3_flag(tp, 57765_PLUS)) {
14155                         tg3_flag_set(tp, SUPPORT_MSIX);
14156                         tp->irq_max = TG3_IRQ_MAX_VECS;
14157                         tg3_rss_init_dflt_indir_tbl(tp);
14158                 }
14159         }
14160
14161         if (tg3_flag(tp, 5755_PLUS))
14162                 tg3_flag_set(tp, SHORT_DMA_BUG);
14163
14164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14165                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14166         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14167                 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14168
14169         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14170             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14171             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14172                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14173
14174         if (tg3_flag(tp, 57765_PLUS) &&
14175             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14176                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14177
14178         if (!tg3_flag(tp, 5705_PLUS) ||
14179             tg3_flag(tp, 5780_CLASS) ||
14180             tg3_flag(tp, USE_JUMBO_BDFLAG))
14181                 tg3_flag_set(tp, JUMBO_CAPABLE);
14182
14183         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14184                               &pci_state_reg);
14185
14186         if (pci_is_pcie(tp->pdev)) {
14187                 u16 lnkctl;
14188
14189                 tg3_flag_set(tp, PCI_EXPRESS);
14190
14191                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14192                         int readrq = pcie_get_readrq(tp->pdev);
14193                         if (readrq > 2048)
14194                                 pcie_set_readrq(tp->pdev, 2048);
14195                 }
14196
14197                 pci_read_config_word(tp->pdev,
14198                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14199                                      &lnkctl);
14200                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14201                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14202                             ASIC_REV_5906) {
14203                                 tg3_flag_clear(tp, HW_TSO_2);
14204                                 tg3_flag_clear(tp, TSO_CAPABLE);
14205                         }
14206                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14207                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14208                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14209                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14210                                 tg3_flag_set(tp, CLKREQ_BUG);
14211                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14212                         tg3_flag_set(tp, L1PLLPD_EN);
14213                 }
14214         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14215                 /* BCM5785 devices are effectively PCIe devices, and should
14216                  * follow PCIe codepaths, but do not have a PCIe capabilities
14217                  * section.
14218                  */
14219                 tg3_flag_set(tp, PCI_EXPRESS);
14220         } else if (!tg3_flag(tp, 5705_PLUS) ||
14221                    tg3_flag(tp, 5780_CLASS)) {
14222                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14223                 if (!tp->pcix_cap) {
14224                         dev_err(&tp->pdev->dev,
14225                                 "Cannot find PCI-X capability, aborting\n");
14226                         return -EIO;
14227                 }
14228
14229                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14230                         tg3_flag_set(tp, PCIX_MODE);
14231         }
14232
14233         /* If we have an AMD 762 or VIA K8T800 chipset, write
14234          * reordering to the mailbox registers done by the host
14235          * controller can cause major troubles.  We read back from
14236          * every mailbox register write to force the writes to be
14237          * posted to the chip in order.
14238          */
14239         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14240             !tg3_flag(tp, PCI_EXPRESS))
14241                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14242
14243         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14244                              &tp->pci_cacheline_sz);
14245         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14246                              &tp->pci_lat_timer);
14247         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14248             tp->pci_lat_timer < 64) {
14249                 tp->pci_lat_timer = 64;
14250                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14251                                       tp->pci_lat_timer);
14252         }
14253
14254         /* Important! -- It is critical that the PCI-X hw workaround
14255          * situation is decided before the first MMIO register access.
14256          */
14257         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14258                 /* 5700 BX chips need to have their TX producer index
14259                  * mailboxes written twice to workaround a bug.
14260                  */
14261                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14262
14263                 /* If we are in PCI-X mode, enable register write workaround.
14264                  *
14265                  * The workaround is to use indirect register accesses
14266                  * for all chip writes not to mailbox registers.
14267                  */
14268                 if (tg3_flag(tp, PCIX_MODE)) {
14269                         u32 pm_reg;
14270
14271                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14272
14273                         /* The chip can have it's power management PCI config
14274                          * space registers clobbered due to this bug.
14275                          * So explicitly force the chip into D0 here.
14276                          */
14277                         pci_read_config_dword(tp->pdev,
14278                                               tp->pm_cap + PCI_PM_CTRL,
14279                                               &pm_reg);
14280                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14281                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14282                         pci_write_config_dword(tp->pdev,
14283                                                tp->pm_cap + PCI_PM_CTRL,
14284                                                pm_reg);
14285
14286                         /* Also, force SERR#/PERR# in PCI command. */
14287                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14288                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14289                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14290                 }
14291         }
14292
14293         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14294                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14295         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14296                 tg3_flag_set(tp, PCI_32BIT);
14297
14298         /* Chip-specific fixup from Broadcom driver */
14299         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14300             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14301                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14302                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14303         }
14304
14305         /* Default fast path register access methods */
14306         tp->read32 = tg3_read32;
14307         tp->write32 = tg3_write32;
14308         tp->read32_mbox = tg3_read32;
14309         tp->write32_mbox = tg3_write32;
14310         tp->write32_tx_mbox = tg3_write32;
14311         tp->write32_rx_mbox = tg3_write32;
14312
14313         /* Various workaround register access methods */
14314         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14315                 tp->write32 = tg3_write_indirect_reg32;
14316         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14317                  (tg3_flag(tp, PCI_EXPRESS) &&
14318                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14319                 /*
14320                  * Back to back register writes can cause problems on these
14321                  * chips, the workaround is to read back all reg writes
14322                  * except those to mailbox regs.
14323                  *
14324                  * See tg3_write_indirect_reg32().
14325                  */
14326                 tp->write32 = tg3_write_flush_reg32;
14327         }
14328
14329         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14330                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14331                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14332                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14333         }
14334
14335         if (tg3_flag(tp, ICH_WORKAROUND)) {
14336                 tp->read32 = tg3_read_indirect_reg32;
14337                 tp->write32 = tg3_write_indirect_reg32;
14338                 tp->read32_mbox = tg3_read_indirect_mbox;
14339                 tp->write32_mbox = tg3_write_indirect_mbox;
14340                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14341                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14342
14343                 iounmap(tp->regs);
14344                 tp->regs = NULL;
14345
14346                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14347                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14348                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14349         }
14350         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14351                 tp->read32_mbox = tg3_read32_mbox_5906;
14352                 tp->write32_mbox = tg3_write32_mbox_5906;
14353                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14354                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14355         }
14356
14357         if (tp->write32 == tg3_write_indirect_reg32 ||
14358             (tg3_flag(tp, PCIX_MODE) &&
14359              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14360               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14361                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14362
14363         /* The memory arbiter has to be enabled in order for SRAM accesses
14364          * to succeed.  Normally on powerup the tg3 chip firmware will make
14365          * sure it is enabled, but other entities such as system netboot
14366          * code might disable it.
14367          */
14368         val = tr32(MEMARB_MODE);
14369         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14370
14371         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14373             tg3_flag(tp, 5780_CLASS)) {
14374                 if (tg3_flag(tp, PCIX_MODE)) {
14375                         pci_read_config_dword(tp->pdev,
14376                                               tp->pcix_cap + PCI_X_STATUS,
14377                                               &val);
14378                         tp->pci_fn = val & 0x7;
14379                 }
14380         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14381                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14382                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14383                     NIC_SRAM_CPMUSTAT_SIG) {
14384                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14385                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14386                 }
14387         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14388                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14389                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14390                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14391                     NIC_SRAM_CPMUSTAT_SIG) {
14392                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14393                                      TG3_CPMU_STATUS_FSHFT_5719;
14394                 }
14395         }
14396
14397         /* Get eeprom hw config before calling tg3_set_power_state().
14398          * In particular, the TG3_FLAG_IS_NIC flag must be
14399          * determined before calling tg3_set_power_state() so that
14400          * we know whether or not to switch out of Vaux power.
14401          * When the flag is set, it means that GPIO1 is used for eeprom
14402          * write protect and also implies that it is a LOM where GPIOs
14403          * are not used to switch power.
14404          */
14405         tg3_get_eeprom_hw_cfg(tp);
14406
14407         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14408                 tg3_flag_clear(tp, TSO_CAPABLE);
14409                 tg3_flag_clear(tp, TSO_BUG);
14410                 tp->fw_needed = NULL;
14411         }
14412
14413         if (tg3_flag(tp, ENABLE_APE)) {
14414                 /* Allow reads and writes to the
14415                  * APE register and memory space.
14416                  */
14417                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14418                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14419                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14420                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14421                                        pci_state_reg);
14422
14423                 tg3_ape_lock_init(tp);
14424         }
14425
14426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14427             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14429             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14430             tg3_flag(tp, 57765_PLUS))
14431                 tg3_flag_set(tp, CPMU_PRESENT);
14432
14433         /* Set up tp->grc_local_ctrl before calling
14434          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14435          * will bring 5700's external PHY out of reset.
14436          * It is also used as eeprom write protect on LOMs.
14437          */
14438         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14440             tg3_flag(tp, EEPROM_WRITE_PROT))
14441                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14442                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14443         /* Unused GPIO3 must be driven as output on 5752 because there
14444          * are no pull-up resistors on unused GPIO pins.
14445          */
14446         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14447                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14448
14449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14451             tg3_flag(tp, 57765_CLASS))
14452                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14453
14454         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14455             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14456                 /* Turn off the debug UART. */
14457                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14458                 if (tg3_flag(tp, IS_NIC))
14459                         /* Keep VMain power. */
14460                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14461                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14462         }
14463
14464         /* Switch out of Vaux if it is a NIC */
14465         tg3_pwrsrc_switch_to_vmain(tp);
14466
14467         /* Derive initial jumbo mode from MTU assigned in
14468          * ether_setup() via the alloc_etherdev() call
14469          */
14470         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14471                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14472
14473         /* Determine WakeOnLan speed to use. */
14474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14475             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14476             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14477             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14478                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14479         } else {
14480                 tg3_flag_set(tp, WOL_SPEED_100MB);
14481         }
14482
14483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14484                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14485
14486         /* A few boards don't want Ethernet@WireSpeed phy feature */
14487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14488             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14489              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14490              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14491             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14492             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14493                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14494
14495         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14496             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14497                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14498         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14499                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14500
14501         if (tg3_flag(tp, 5705_PLUS) &&
14502             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14503             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14504             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14505             !tg3_flag(tp, 57765_PLUS)) {
14506                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14507                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14508                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14509                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14510                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14511                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14512                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14513                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14514                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14515                 } else
14516                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14517         }
14518
14519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14520             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14521                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14522                 if (tp->phy_otp == 0)
14523                         tp->phy_otp = TG3_OTP_DEFAULT;
14524         }
14525
14526         if (tg3_flag(tp, CPMU_PRESENT))
14527                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14528         else
14529                 tp->mi_mode = MAC_MI_MODE_BASE;
14530
14531         tp->coalesce_mode = 0;
14532         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14533             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14534                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14535
14536         /* Set these bits to enable statistics workaround. */
14537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14538             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14539             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14540                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14541                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14542         }
14543
14544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14546                 tg3_flag_set(tp, USE_PHYLIB);
14547
14548         err = tg3_mdio_init(tp);
14549         if (err)
14550                 return err;
14551
14552         /* Initialize data/descriptor byte/word swapping. */
14553         val = tr32(GRC_MODE);
14554         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14555                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14556                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14557                         GRC_MODE_B2HRX_ENABLE |
14558                         GRC_MODE_HTX2B_ENABLE |
14559                         GRC_MODE_HOST_STACKUP);
14560         else
14561                 val &= GRC_MODE_HOST_STACKUP;
14562
14563         tw32(GRC_MODE, val | tp->grc_mode);
14564
14565         tg3_switch_clocks(tp);
14566
14567         /* Clear this out for sanity. */
14568         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14569
14570         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14571                               &pci_state_reg);
14572         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14573             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14574                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14575
14576                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14577                     chiprevid == CHIPREV_ID_5701_B0 ||
14578                     chiprevid == CHIPREV_ID_5701_B2 ||
14579                     chiprevid == CHIPREV_ID_5701_B5) {
14580                         void __iomem *sram_base;
14581
14582                         /* Write some dummy words into the SRAM status block
14583                          * area, see if it reads back correctly.  If the return
14584                          * value is bad, force enable the PCIX workaround.
14585                          */
14586                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14587
14588                         writel(0x00000000, sram_base);
14589                         writel(0x00000000, sram_base + 4);
14590                         writel(0xffffffff, sram_base + 4);
14591                         if (readl(sram_base) != 0x00000000)
14592                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14593                 }
14594         }
14595
14596         udelay(50);
14597         tg3_nvram_init(tp);
14598
14599         grc_misc_cfg = tr32(GRC_MISC_CFG);
14600         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14601
14602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14603             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14604              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14605                 tg3_flag_set(tp, IS_5788);
14606
14607         if (!tg3_flag(tp, IS_5788) &&
14608             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14609                 tg3_flag_set(tp, TAGGED_STATUS);
14610         if (tg3_flag(tp, TAGGED_STATUS)) {
14611                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14612                                       HOSTCC_MODE_CLRTICK_TXBD);
14613
14614                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14615                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14616                                        tp->misc_host_ctrl);
14617         }
14618
14619         /* Preserve the APE MAC_MODE bits */
14620         if (tg3_flag(tp, ENABLE_APE))
14621                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14622         else
14623                 tp->mac_mode = 0;
14624
14625         /* these are limited to 10/100 only */
14626         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14627              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14628             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14629              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14630              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14631               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14632               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14633             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14634              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14635               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14636               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14637             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14638             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14639             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14640             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14641                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14642
14643         err = tg3_phy_probe(tp);
14644         if (err) {
14645                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14646                 /* ... but do not return immediately ... */
14647                 tg3_mdio_fini(tp);
14648         }
14649
14650         tg3_read_vpd(tp);
14651         tg3_read_fw_ver(tp);
14652
14653         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14654                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14655         } else {
14656                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14657                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14658                 else
14659                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14660         }
14661
14662         /* 5700 {AX,BX} chips have a broken status block link
14663          * change bit implementation, so we must use the
14664          * status register in those cases.
14665          */
14666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14667                 tg3_flag_set(tp, USE_LINKCHG_REG);
14668         else
14669                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14670
14671         /* The led_ctrl is set during tg3_phy_probe, here we might
14672          * have to force the link status polling mechanism based
14673          * upon subsystem IDs.
14674          */
14675         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14677             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14678                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14679                 tg3_flag_set(tp, USE_LINKCHG_REG);
14680         }
14681
14682         /* For all SERDES we poll the MAC status register. */
14683         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14684                 tg3_flag_set(tp, POLL_SERDES);
14685         else
14686                 tg3_flag_clear(tp, POLL_SERDES);
14687
14688         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14689         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14691             tg3_flag(tp, PCIX_MODE)) {
14692                 tp->rx_offset = NET_SKB_PAD;
14693 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14694                 tp->rx_copy_thresh = ~(u16)0;
14695 #endif
14696         }
14697
14698         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14699         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14700         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14701
14702         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14703
14704         /* Increment the rx prod index on the rx std ring by at most
14705          * 8 for these chips to workaround hw errata.
14706          */
14707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14710                 tp->rx_std_max_post = 8;
14711
14712         if (tg3_flag(tp, ASPM_WORKAROUND))
14713                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14714                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14715
14716         return err;
14717 }
14718
14719 #ifdef CONFIG_SPARC
14720 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14721 {
14722         struct net_device *dev = tp->dev;
14723         struct pci_dev *pdev = tp->pdev;
14724         struct device_node *dp = pci_device_to_OF_node(pdev);
14725         const unsigned char *addr;
14726         int len;
14727
14728         addr = of_get_property(dp, "local-mac-address", &len);
14729         if (addr && len == 6) {
14730                 memcpy(dev->dev_addr, addr, 6);
14731                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14732                 return 0;
14733         }
14734         return -ENODEV;
14735 }
14736
14737 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14738 {
14739         struct net_device *dev = tp->dev;
14740
14741         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14742         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14743         return 0;
14744 }
14745 #endif
14746
14747 static int __devinit tg3_get_device_address(struct tg3 *tp)
14748 {
14749         struct net_device *dev = tp->dev;
14750         u32 hi, lo, mac_offset;
14751         int addr_ok = 0;
14752
14753 #ifdef CONFIG_SPARC
14754         if (!tg3_get_macaddr_sparc(tp))
14755                 return 0;
14756 #endif
14757
14758         mac_offset = 0x7c;
14759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14760             tg3_flag(tp, 5780_CLASS)) {
14761                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14762                         mac_offset = 0xcc;
14763                 if (tg3_nvram_lock(tp))
14764                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14765                 else
14766                         tg3_nvram_unlock(tp);
14767         } else if (tg3_flag(tp, 5717_PLUS)) {
14768                 if (tp->pci_fn & 1)
14769                         mac_offset = 0xcc;
14770                 if (tp->pci_fn > 1)
14771                         mac_offset += 0x18c;
14772         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14773                 mac_offset = 0x10;
14774
14775         /* First try to get it from MAC address mailbox. */
14776         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14777         if ((hi >> 16) == 0x484b) {
14778                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14779                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14780
14781                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14782                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14783                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14784                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14785                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14786
14787                 /* Some old bootcode may report a 0 MAC address in SRAM */
14788                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14789         }
14790         if (!addr_ok) {
14791                 /* Next, try NVRAM. */
14792                 if (!tg3_flag(tp, NO_NVRAM) &&
14793                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14794                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14795                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14796                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14797                 }
14798                 /* Finally just fetch it out of the MAC control regs. */
14799                 else {
14800                         hi = tr32(MAC_ADDR_0_HIGH);
14801                         lo = tr32(MAC_ADDR_0_LOW);
14802
14803                         dev->dev_addr[5] = lo & 0xff;
14804                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14805                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14806                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14807                         dev->dev_addr[1] = hi & 0xff;
14808                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14809                 }
14810         }
14811
14812         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14813 #ifdef CONFIG_SPARC
14814                 if (!tg3_get_default_macaddr_sparc(tp))
14815                         return 0;
14816 #endif
14817                 return -EINVAL;
14818         }
14819         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14820         return 0;
14821 }
14822
14823 #define BOUNDARY_SINGLE_CACHELINE       1
14824 #define BOUNDARY_MULTI_CACHELINE        2
14825
14826 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14827 {
14828         int cacheline_size;
14829         u8 byte;
14830         int goal;
14831
14832         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14833         if (byte == 0)
14834                 cacheline_size = 1024;
14835         else
14836                 cacheline_size = (int) byte * 4;
14837
14838         /* On 5703 and later chips, the boundary bits have no
14839          * effect.
14840          */
14841         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14842             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14843             !tg3_flag(tp, PCI_EXPRESS))
14844                 goto out;
14845
14846 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14847         goal = BOUNDARY_MULTI_CACHELINE;
14848 #else
14849 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14850         goal = BOUNDARY_SINGLE_CACHELINE;
14851 #else
14852         goal = 0;
14853 #endif
14854 #endif
14855
14856         if (tg3_flag(tp, 57765_PLUS)) {
14857                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14858                 goto out;
14859         }
14860
14861         if (!goal)
14862                 goto out;
14863
14864         /* PCI controllers on most RISC systems tend to disconnect
14865          * when a device tries to burst across a cache-line boundary.
14866          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14867          *
14868          * Unfortunately, for PCI-E there are only limited
14869          * write-side controls for this, and thus for reads
14870          * we will still get the disconnects.  We'll also waste
14871          * these PCI cycles for both read and write for chips
14872          * other than 5700 and 5701 which do not implement the
14873          * boundary bits.
14874          */
14875         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14876                 switch (cacheline_size) {
14877                 case 16:
14878                 case 32:
14879                 case 64:
14880                 case 128:
14881                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14882                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14883                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14884                         } else {
14885                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14886                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14887                         }
14888                         break;
14889
14890                 case 256:
14891                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14892                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14893                         break;
14894
14895                 default:
14896                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14897                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14898                         break;
14899                 }
14900         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14901                 switch (cacheline_size) {
14902                 case 16:
14903                 case 32:
14904                 case 64:
14905                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14906                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14907                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14908                                 break;
14909                         }
14910                         /* fallthrough */
14911                 case 128:
14912                 default:
14913                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14914                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14915                         break;
14916                 }
14917         } else {
14918                 switch (cacheline_size) {
14919                 case 16:
14920                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14921                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14922                                         DMA_RWCTRL_WRITE_BNDRY_16);
14923                                 break;
14924                         }
14925                         /* fallthrough */
14926                 case 32:
14927                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14928                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14929                                         DMA_RWCTRL_WRITE_BNDRY_32);
14930                                 break;
14931                         }
14932                         /* fallthrough */
14933                 case 64:
14934                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14935                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14936                                         DMA_RWCTRL_WRITE_BNDRY_64);
14937                                 break;
14938                         }
14939                         /* fallthrough */
14940                 case 128:
14941                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14942                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14943                                         DMA_RWCTRL_WRITE_BNDRY_128);
14944                                 break;
14945                         }
14946                         /* fallthrough */
14947                 case 256:
14948                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14949                                 DMA_RWCTRL_WRITE_BNDRY_256);
14950                         break;
14951                 case 512:
14952                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14953                                 DMA_RWCTRL_WRITE_BNDRY_512);
14954                         break;
14955                 case 1024:
14956                 default:
14957                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14958                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14959                         break;
14960                 }
14961         }
14962
14963 out:
14964         return val;
14965 }
14966
14967 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14968 {
14969         struct tg3_internal_buffer_desc test_desc;
14970         u32 sram_dma_descs;
14971         int i, ret;
14972
14973         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14974
14975         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14976         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14977         tw32(RDMAC_STATUS, 0);
14978         tw32(WDMAC_STATUS, 0);
14979
14980         tw32(BUFMGR_MODE, 0);
14981         tw32(FTQ_RESET, 0);
14982
14983         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14984         test_desc.addr_lo = buf_dma & 0xffffffff;
14985         test_desc.nic_mbuf = 0x00002100;
14986         test_desc.len = size;
14987
14988         /*
14989          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14990          * the *second* time the tg3 driver was getting loaded after an
14991          * initial scan.
14992          *
14993          * Broadcom tells me:
14994          *   ...the DMA engine is connected to the GRC block and a DMA
14995          *   reset may affect the GRC block in some unpredictable way...
14996          *   The behavior of resets to individual blocks has not been tested.
14997          *
14998          * Broadcom noted the GRC reset will also reset all sub-components.
14999          */
15000         if (to_device) {
15001                 test_desc.cqid_sqid = (13 << 8) | 2;
15002
15003                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15004                 udelay(40);
15005         } else {
15006                 test_desc.cqid_sqid = (16 << 8) | 7;
15007
15008                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15009                 udelay(40);
15010         }
15011         test_desc.flags = 0x00000005;
15012
15013         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15014                 u32 val;
15015
15016                 val = *(((u32 *)&test_desc) + i);
15017                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15018                                        sram_dma_descs + (i * sizeof(u32)));
15019                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15020         }
15021         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15022
15023         if (to_device)
15024                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15025         else
15026                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15027
15028         ret = -ENODEV;
15029         for (i = 0; i < 40; i++) {
15030                 u32 val;
15031
15032                 if (to_device)
15033                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15034                 else
15035                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15036                 if ((val & 0xffff) == sram_dma_descs) {
15037                         ret = 0;
15038                         break;
15039                 }
15040
15041                 udelay(100);
15042         }
15043
15044         return ret;
15045 }
15046
15047 #define TEST_BUFFER_SIZE        0x2000
15048
15049 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15050         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15051         { },
15052 };
15053
15054 static int __devinit tg3_test_dma(struct tg3 *tp)
15055 {
15056         dma_addr_t buf_dma;
15057         u32 *buf, saved_dma_rwctrl;
15058         int ret = 0;
15059
15060         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15061                                  &buf_dma, GFP_KERNEL);
15062         if (!buf) {
15063                 ret = -ENOMEM;
15064                 goto out_nofree;
15065         }
15066
15067         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15068                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15069
15070         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15071
15072         if (tg3_flag(tp, 57765_PLUS))
15073                 goto out;
15074
15075         if (tg3_flag(tp, PCI_EXPRESS)) {
15076                 /* DMA read watermark not used on PCIE */
15077                 tp->dma_rwctrl |= 0x00180000;
15078         } else if (!tg3_flag(tp, PCIX_MODE)) {
15079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15080                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15081                         tp->dma_rwctrl |= 0x003f0000;
15082                 else
15083                         tp->dma_rwctrl |= 0x003f000f;
15084         } else {
15085                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15086                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15087                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15088                         u32 read_water = 0x7;
15089
15090                         /* If the 5704 is behind the EPB bridge, we can
15091                          * do the less restrictive ONE_DMA workaround for
15092                          * better performance.
15093                          */
15094                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15095                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15096                                 tp->dma_rwctrl |= 0x8000;
15097                         else if (ccval == 0x6 || ccval == 0x7)
15098                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15099
15100                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15101                                 read_water = 4;
15102                         /* Set bit 23 to enable PCIX hw bug fix */
15103                         tp->dma_rwctrl |=
15104                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15105                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15106                                 (1 << 23);
15107                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15108                         /* 5780 always in PCIX mode */
15109                         tp->dma_rwctrl |= 0x00144000;
15110                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15111                         /* 5714 always in PCIX mode */
15112                         tp->dma_rwctrl |= 0x00148000;
15113                 } else {
15114                         tp->dma_rwctrl |= 0x001b000f;
15115                 }
15116         }
15117
15118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15120                 tp->dma_rwctrl &= 0xfffffff0;
15121
15122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15124                 /* Remove this if it causes problems for some boards. */
15125                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15126
15127                 /* On 5700/5701 chips, we need to set this bit.
15128                  * Otherwise the chip will issue cacheline transactions
15129                  * to streamable DMA memory with not all the byte
15130                  * enables turned on.  This is an error on several
15131                  * RISC PCI controllers, in particular sparc64.
15132                  *
15133                  * On 5703/5704 chips, this bit has been reassigned
15134                  * a different meaning.  In particular, it is used
15135                  * on those chips to enable a PCI-X workaround.
15136                  */
15137                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15138         }
15139
15140         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15141
15142 #if 0
15143         /* Unneeded, already done by tg3_get_invariants.  */
15144         tg3_switch_clocks(tp);
15145 #endif
15146
15147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15148             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15149                 goto out;
15150
15151         /* It is best to perform DMA test with maximum write burst size
15152          * to expose the 5700/5701 write DMA bug.
15153          */
15154         saved_dma_rwctrl = tp->dma_rwctrl;
15155         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15156         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15157
15158         while (1) {
15159                 u32 *p = buf, i;
15160
15161                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15162                         p[i] = i;
15163
15164                 /* Send the buffer to the chip. */
15165                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15166                 if (ret) {
15167                         dev_err(&tp->pdev->dev,
15168                                 "%s: Buffer write failed. err = %d\n",
15169                                 __func__, ret);
15170                         break;
15171                 }
15172
15173 #if 0
15174                 /* validate data reached card RAM correctly. */
15175                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15176                         u32 val;
15177                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15178                         if (le32_to_cpu(val) != p[i]) {
15179                                 dev_err(&tp->pdev->dev,
15180                                         "%s: Buffer corrupted on device! "
15181                                         "(%d != %d)\n", __func__, val, i);
15182                                 /* ret = -ENODEV here? */
15183                         }
15184                         p[i] = 0;
15185                 }
15186 #endif
15187                 /* Now read it back. */
15188                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15189                 if (ret) {
15190                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15191                                 "err = %d\n", __func__, ret);
15192                         break;
15193                 }
15194
15195                 /* Verify it. */
15196                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15197                         if (p[i] == i)
15198                                 continue;
15199
15200                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15201                             DMA_RWCTRL_WRITE_BNDRY_16) {
15202                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15203                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15204                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15205                                 break;
15206                         } else {
15207                                 dev_err(&tp->pdev->dev,
15208                                         "%s: Buffer corrupted on read back! "
15209                                         "(%d != %d)\n", __func__, p[i], i);
15210                                 ret = -ENODEV;
15211                                 goto out;
15212                         }
15213                 }
15214
15215                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15216                         /* Success. */
15217                         ret = 0;
15218                         break;
15219                 }
15220         }
15221         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15222             DMA_RWCTRL_WRITE_BNDRY_16) {
15223                 /* DMA test passed without adjusting DMA boundary,
15224                  * now look for chipsets that are known to expose the
15225                  * DMA bug without failing the test.
15226                  */
15227                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15228                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15229                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15230                 } else {
15231                         /* Safe to use the calculated DMA boundary. */
15232                         tp->dma_rwctrl = saved_dma_rwctrl;
15233                 }
15234
15235                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15236         }
15237
15238 out:
15239         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15240 out_nofree:
15241         return ret;
15242 }
15243
15244 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15245 {
15246         if (tg3_flag(tp, 57765_PLUS)) {
15247                 tp->bufmgr_config.mbuf_read_dma_low_water =
15248                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15249                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15250                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15251                 tp->bufmgr_config.mbuf_high_water =
15252                         DEFAULT_MB_HIGH_WATER_57765;
15253
15254                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15255                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15256                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15257                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15258                 tp->bufmgr_config.mbuf_high_water_jumbo =
15259                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15260         } else if (tg3_flag(tp, 5705_PLUS)) {
15261                 tp->bufmgr_config.mbuf_read_dma_low_water =
15262                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15263                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15264                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15265                 tp->bufmgr_config.mbuf_high_water =
15266                         DEFAULT_MB_HIGH_WATER_5705;
15267                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15268                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15269                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15270                         tp->bufmgr_config.mbuf_high_water =
15271                                 DEFAULT_MB_HIGH_WATER_5906;
15272                 }
15273
15274                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15275                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15276                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15277                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15278                 tp->bufmgr_config.mbuf_high_water_jumbo =
15279                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15280         } else {
15281                 tp->bufmgr_config.mbuf_read_dma_low_water =
15282                         DEFAULT_MB_RDMA_LOW_WATER;
15283                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15284                         DEFAULT_MB_MACRX_LOW_WATER;
15285                 tp->bufmgr_config.mbuf_high_water =
15286                         DEFAULT_MB_HIGH_WATER;
15287
15288                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15289                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15290                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15291                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15292                 tp->bufmgr_config.mbuf_high_water_jumbo =
15293                         DEFAULT_MB_HIGH_WATER_JUMBO;
15294         }
15295
15296         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15297         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15298 }
15299
15300 static char * __devinit tg3_phy_string(struct tg3 *tp)
15301 {
15302         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15303         case TG3_PHY_ID_BCM5400:        return "5400";
15304         case TG3_PHY_ID_BCM5401:        return "5401";
15305         case TG3_PHY_ID_BCM5411:        return "5411";
15306         case TG3_PHY_ID_BCM5701:        return "5701";
15307         case TG3_PHY_ID_BCM5703:        return "5703";
15308         case TG3_PHY_ID_BCM5704:        return "5704";
15309         case TG3_PHY_ID_BCM5705:        return "5705";
15310         case TG3_PHY_ID_BCM5750:        return "5750";
15311         case TG3_PHY_ID_BCM5752:        return "5752";
15312         case TG3_PHY_ID_BCM5714:        return "5714";
15313         case TG3_PHY_ID_BCM5780:        return "5780";
15314         case TG3_PHY_ID_BCM5755:        return "5755";
15315         case TG3_PHY_ID_BCM5787:        return "5787";
15316         case TG3_PHY_ID_BCM5784:        return "5784";
15317         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15318         case TG3_PHY_ID_BCM5906:        return "5906";
15319         case TG3_PHY_ID_BCM5761:        return "5761";
15320         case TG3_PHY_ID_BCM5718C:       return "5718C";
15321         case TG3_PHY_ID_BCM5718S:       return "5718S";
15322         case TG3_PHY_ID_BCM57765:       return "57765";
15323         case TG3_PHY_ID_BCM5719C:       return "5719C";
15324         case TG3_PHY_ID_BCM5720C:       return "5720C";
15325         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15326         case 0:                 return "serdes";
15327         default:                return "unknown";
15328         }
15329 }
15330
15331 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15332 {
15333         if (tg3_flag(tp, PCI_EXPRESS)) {
15334                 strcpy(str, "PCI Express");
15335                 return str;
15336         } else if (tg3_flag(tp, PCIX_MODE)) {
15337                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15338
15339                 strcpy(str, "PCIX:");
15340
15341                 if ((clock_ctrl == 7) ||
15342                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15343                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15344                         strcat(str, "133MHz");
15345                 else if (clock_ctrl == 0)
15346                         strcat(str, "33MHz");
15347                 else if (clock_ctrl == 2)
15348                         strcat(str, "50MHz");
15349                 else if (clock_ctrl == 4)
15350                         strcat(str, "66MHz");
15351                 else if (clock_ctrl == 6)
15352                         strcat(str, "100MHz");
15353         } else {
15354                 strcpy(str, "PCI:");
15355                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15356                         strcat(str, "66MHz");
15357                 else
15358                         strcat(str, "33MHz");
15359         }
15360         if (tg3_flag(tp, PCI_32BIT))
15361                 strcat(str, ":32-bit");
15362         else
15363                 strcat(str, ":64-bit");
15364         return str;
15365 }
15366
15367 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15368 {
15369         struct pci_dev *peer;
15370         unsigned int func, devnr = tp->pdev->devfn & ~7;
15371
15372         for (func = 0; func < 8; func++) {
15373                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15374                 if (peer && peer != tp->pdev)
15375                         break;
15376                 pci_dev_put(peer);
15377         }
15378         /* 5704 can be configured in single-port mode, set peer to
15379          * tp->pdev in that case.
15380          */
15381         if (!peer) {
15382                 peer = tp->pdev;
15383                 return peer;
15384         }
15385
15386         /*
15387          * We don't need to keep the refcount elevated; there's no way
15388          * to remove one half of this device without removing the other
15389          */
15390         pci_dev_put(peer);
15391
15392         return peer;
15393 }
15394
15395 static void __devinit tg3_init_coal(struct tg3 *tp)
15396 {
15397         struct ethtool_coalesce *ec = &tp->coal;
15398
15399         memset(ec, 0, sizeof(*ec));
15400         ec->cmd = ETHTOOL_GCOALESCE;
15401         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15402         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15403         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15404         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15405         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15406         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15407         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15408         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15409         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15410
15411         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15412                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15413                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15414                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15415                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15416                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15417         }
15418
15419         if (tg3_flag(tp, 5705_PLUS)) {
15420                 ec->rx_coalesce_usecs_irq = 0;
15421                 ec->tx_coalesce_usecs_irq = 0;
15422                 ec->stats_block_coalesce_usecs = 0;
15423         }
15424 }
15425
15426 static int __devinit tg3_init_one(struct pci_dev *pdev,
15427                                   const struct pci_device_id *ent)
15428 {
15429         struct net_device *dev;
15430         struct tg3 *tp;
15431         int i, err, pm_cap;
15432         u32 sndmbx, rcvmbx, intmbx;
15433         char str[40];
15434         u64 dma_mask, persist_dma_mask;
15435         netdev_features_t features = 0;
15436
15437         printk_once(KERN_INFO "%s\n", version);
15438
15439         err = pci_enable_device(pdev);
15440         if (err) {
15441                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15442                 return err;
15443         }
15444
15445         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15446         if (err) {
15447                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15448                 goto err_out_disable_pdev;
15449         }
15450
15451         pci_set_master(pdev);
15452
15453         /* Find power-management capability. */
15454         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15455         if (pm_cap == 0) {
15456                 dev_err(&pdev->dev,
15457                         "Cannot find Power Management capability, aborting\n");
15458                 err = -EIO;
15459                 goto err_out_free_res;
15460         }
15461
15462         err = pci_set_power_state(pdev, PCI_D0);
15463         if (err) {
15464                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15465                 goto err_out_free_res;
15466         }
15467
15468         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15469         if (!dev) {
15470                 err = -ENOMEM;
15471                 goto err_out_power_down;
15472         }
15473
15474         SET_NETDEV_DEV(dev, &pdev->dev);
15475
15476         tp = netdev_priv(dev);
15477         tp->pdev = pdev;
15478         tp->dev = dev;
15479         tp->pm_cap = pm_cap;
15480         tp->rx_mode = TG3_DEF_RX_MODE;
15481         tp->tx_mode = TG3_DEF_TX_MODE;
15482
15483         if (tg3_debug > 0)
15484                 tp->msg_enable = tg3_debug;
15485         else
15486                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15487
15488         /* The word/byte swap controls here control register access byte
15489          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15490          * setting below.
15491          */
15492         tp->misc_host_ctrl =
15493                 MISC_HOST_CTRL_MASK_PCI_INT |
15494                 MISC_HOST_CTRL_WORD_SWAP |
15495                 MISC_HOST_CTRL_INDIR_ACCESS |
15496                 MISC_HOST_CTRL_PCISTATE_RW;
15497
15498         /* The NONFRM (non-frame) byte/word swap controls take effect
15499          * on descriptor entries, anything which isn't packet data.
15500          *
15501          * The StrongARM chips on the board (one for tx, one for rx)
15502          * are running in big-endian mode.
15503          */
15504         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15505                         GRC_MODE_WSWAP_NONFRM_DATA);
15506 #ifdef __BIG_ENDIAN
15507         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15508 #endif
15509         spin_lock_init(&tp->lock);
15510         spin_lock_init(&tp->indirect_lock);
15511         INIT_WORK(&tp->reset_task, tg3_reset_task);
15512
15513         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15514         if (!tp->regs) {
15515                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15516                 err = -ENOMEM;
15517                 goto err_out_free_dev;
15518         }
15519
15520         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15521             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15522             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15523             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15524             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15525             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15526             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15527             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15528                 tg3_flag_set(tp, ENABLE_APE);
15529                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15530                 if (!tp->aperegs) {
15531                         dev_err(&pdev->dev,
15532                                 "Cannot map APE registers, aborting\n");
15533                         err = -ENOMEM;
15534                         goto err_out_iounmap;
15535                 }
15536         }
15537
15538         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15539         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15540
15541         dev->ethtool_ops = &tg3_ethtool_ops;
15542         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15543         dev->netdev_ops = &tg3_netdev_ops;
15544         dev->irq = pdev->irq;
15545
15546         err = tg3_get_invariants(tp);
15547         if (err) {
15548                 dev_err(&pdev->dev,
15549                         "Problem fetching invariants of chip, aborting\n");
15550                 goto err_out_apeunmap;
15551         }
15552
15553         /* The EPB bridge inside 5714, 5715, and 5780 and any
15554          * device behind the EPB cannot support DMA addresses > 40-bit.
15555          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15556          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15557          * do DMA address check in tg3_start_xmit().
15558          */
15559         if (tg3_flag(tp, IS_5788))
15560                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15561         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15562                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15563 #ifdef CONFIG_HIGHMEM
15564                 dma_mask = DMA_BIT_MASK(64);
15565 #endif
15566         } else
15567                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15568
15569         /* Configure DMA attributes. */
15570         if (dma_mask > DMA_BIT_MASK(32)) {
15571                 err = pci_set_dma_mask(pdev, dma_mask);
15572                 if (!err) {
15573                         features |= NETIF_F_HIGHDMA;
15574                         err = pci_set_consistent_dma_mask(pdev,
15575                                                           persist_dma_mask);
15576                         if (err < 0) {
15577                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15578                                         "DMA for consistent allocations\n");
15579                                 goto err_out_apeunmap;
15580                         }
15581                 }
15582         }
15583         if (err || dma_mask == DMA_BIT_MASK(32)) {
15584                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15585                 if (err) {
15586                         dev_err(&pdev->dev,
15587                                 "No usable DMA configuration, aborting\n");
15588                         goto err_out_apeunmap;
15589                 }
15590         }
15591
15592         tg3_init_bufmgr_config(tp);
15593
15594         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15595
15596         /* 5700 B0 chips do not support checksumming correctly due
15597          * to hardware bugs.
15598          */
15599         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15600                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15601
15602                 if (tg3_flag(tp, 5755_PLUS))
15603                         features |= NETIF_F_IPV6_CSUM;
15604         }
15605
15606         /* TSO is on by default on chips that support hardware TSO.
15607          * Firmware TSO on older chips gives lower performance, so it
15608          * is off by default, but can be enabled using ethtool.
15609          */
15610         if ((tg3_flag(tp, HW_TSO_1) ||
15611              tg3_flag(tp, HW_TSO_2) ||
15612              tg3_flag(tp, HW_TSO_3)) &&
15613             (features & NETIF_F_IP_CSUM))
15614                 features |= NETIF_F_TSO;
15615         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15616                 if (features & NETIF_F_IPV6_CSUM)
15617                         features |= NETIF_F_TSO6;
15618                 if (tg3_flag(tp, HW_TSO_3) ||
15619                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15620                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15621                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15622                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15623                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15624                         features |= NETIF_F_TSO_ECN;
15625         }
15626
15627         dev->features |= features;
15628         dev->vlan_features |= features;
15629
15630         /*
15631          * Add loopback capability only for a subset of devices that support
15632          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15633          * loopback for the remaining devices.
15634          */
15635         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15636             !tg3_flag(tp, CPMU_PRESENT))
15637                 /* Add the loopback capability */
15638                 features |= NETIF_F_LOOPBACK;
15639
15640         dev->hw_features |= features;
15641
15642         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15643             !tg3_flag(tp, TSO_CAPABLE) &&
15644             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15645                 tg3_flag_set(tp, MAX_RXPEND_64);
15646                 tp->rx_pending = 63;
15647         }
15648
15649         err = tg3_get_device_address(tp);
15650         if (err) {
15651                 dev_err(&pdev->dev,
15652                         "Could not obtain valid ethernet address, aborting\n");
15653                 goto err_out_apeunmap;
15654         }
15655
15656         /*
15657          * Reset chip in case UNDI or EFI driver did not shutdown
15658          * DMA self test will enable WDMAC and we'll see (spurious)
15659          * pending DMA on the PCI bus at that point.
15660          */
15661         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15662             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15663                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15664                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15665         }
15666
15667         err = tg3_test_dma(tp);
15668         if (err) {
15669                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15670                 goto err_out_apeunmap;
15671         }
15672
15673         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15674         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15675         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15676         for (i = 0; i < tp->irq_max; i++) {
15677                 struct tg3_napi *tnapi = &tp->napi[i];
15678
15679                 tnapi->tp = tp;
15680                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15681
15682                 tnapi->int_mbox = intmbx;
15683                 if (i <= 4)
15684                         intmbx += 0x8;
15685                 else
15686                         intmbx += 0x4;
15687
15688                 tnapi->consmbox = rcvmbx;
15689                 tnapi->prodmbox = sndmbx;
15690
15691                 if (i)
15692                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15693                 else
15694                         tnapi->coal_now = HOSTCC_MODE_NOW;
15695
15696                 if (!tg3_flag(tp, SUPPORT_MSIX))
15697                         break;
15698
15699                 /*
15700                  * If we support MSIX, we'll be using RSS.  If we're using
15701                  * RSS, the first vector only handles link interrupts and the
15702                  * remaining vectors handle rx and tx interrupts.  Reuse the
15703                  * mailbox values for the next iteration.  The values we setup
15704                  * above are still useful for the single vectored mode.
15705                  */
15706                 if (!i)
15707                         continue;
15708
15709                 rcvmbx += 0x8;
15710
15711                 if (sndmbx & 0x4)
15712                         sndmbx -= 0x4;
15713                 else
15714                         sndmbx += 0xc;
15715         }
15716
15717         tg3_init_coal(tp);
15718
15719         pci_set_drvdata(pdev, dev);
15720
15721         if (tg3_flag(tp, 5717_PLUS)) {
15722                 /* Resume a low-power mode */
15723                 tg3_frob_aux_power(tp, false);
15724         }
15725
15726         err = register_netdev(dev);
15727         if (err) {
15728                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15729                 goto err_out_apeunmap;
15730         }
15731
15732         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15733                     tp->board_part_number,
15734                     tp->pci_chip_rev_id,
15735                     tg3_bus_string(tp, str),
15736                     dev->dev_addr);
15737
15738         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15739                 struct phy_device *phydev;
15740                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15741                 netdev_info(dev,
15742                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15743                             phydev->drv->name, dev_name(&phydev->dev));
15744         } else {
15745                 char *ethtype;
15746
15747                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15748                         ethtype = "10/100Base-TX";
15749                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15750                         ethtype = "1000Base-SX";
15751                 else
15752                         ethtype = "10/100/1000Base-T";
15753
15754                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15755                             "(WireSpeed[%d], EEE[%d])\n",
15756                             tg3_phy_string(tp), ethtype,
15757                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15758                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15759         }
15760
15761         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15762                     (dev->features & NETIF_F_RXCSUM) != 0,
15763                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15764                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15765                     tg3_flag(tp, ENABLE_ASF) != 0,
15766                     tg3_flag(tp, TSO_CAPABLE) != 0);
15767         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15768                     tp->dma_rwctrl,
15769                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15770                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15771
15772         pci_save_state(pdev);
15773
15774         return 0;
15775
15776 err_out_apeunmap:
15777         if (tp->aperegs) {
15778                 iounmap(tp->aperegs);
15779                 tp->aperegs = NULL;
15780         }
15781
15782 err_out_iounmap:
15783         if (tp->regs) {
15784                 iounmap(tp->regs);
15785                 tp->regs = NULL;
15786         }
15787
15788 err_out_free_dev:
15789         free_netdev(dev);
15790
15791 err_out_power_down:
15792         pci_set_power_state(pdev, PCI_D3hot);
15793
15794 err_out_free_res:
15795         pci_release_regions(pdev);
15796
15797 err_out_disable_pdev:
15798         pci_disable_device(pdev);
15799         pci_set_drvdata(pdev, NULL);
15800         return err;
15801 }
15802
15803 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15804 {
15805         struct net_device *dev = pci_get_drvdata(pdev);
15806
15807         if (dev) {
15808                 struct tg3 *tp = netdev_priv(dev);
15809
15810                 if (tp->fw)
15811                         release_firmware(tp->fw);
15812
15813                 tg3_reset_task_cancel(tp);
15814
15815                 if (tg3_flag(tp, USE_PHYLIB)) {
15816                         tg3_phy_fini(tp);
15817                         tg3_mdio_fini(tp);
15818                 }
15819
15820                 unregister_netdev(dev);
15821                 if (tp->aperegs) {
15822                         iounmap(tp->aperegs);
15823                         tp->aperegs = NULL;
15824                 }
15825                 if (tp->regs) {
15826                         iounmap(tp->regs);
15827                         tp->regs = NULL;
15828                 }
15829                 free_netdev(dev);
15830                 pci_release_regions(pdev);
15831                 pci_disable_device(pdev);
15832                 pci_set_drvdata(pdev, NULL);
15833         }
15834 }
15835
15836 #ifdef CONFIG_PM_SLEEP
15837 static int tg3_suspend(struct device *device)
15838 {
15839         struct pci_dev *pdev = to_pci_dev(device);
15840         struct net_device *dev = pci_get_drvdata(pdev);
15841         struct tg3 *tp = netdev_priv(dev);
15842         int err;
15843
15844         if (!netif_running(dev))
15845                 return 0;
15846
15847         tg3_reset_task_cancel(tp);
15848         tg3_phy_stop(tp);
15849         tg3_netif_stop(tp);
15850
15851         del_timer_sync(&tp->timer);
15852
15853         tg3_full_lock(tp, 1);
15854         tg3_disable_ints(tp);
15855         tg3_full_unlock(tp);
15856
15857         netif_device_detach(dev);
15858
15859         tg3_full_lock(tp, 0);
15860         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15861         tg3_flag_clear(tp, INIT_COMPLETE);
15862         tg3_full_unlock(tp);
15863
15864         err = tg3_power_down_prepare(tp);
15865         if (err) {
15866                 int err2;
15867
15868                 tg3_full_lock(tp, 0);
15869
15870                 tg3_flag_set(tp, INIT_COMPLETE);
15871                 err2 = tg3_restart_hw(tp, 1);
15872                 if (err2)
15873                         goto out;
15874
15875                 tp->timer.expires = jiffies + tp->timer_offset;
15876                 add_timer(&tp->timer);
15877
15878                 netif_device_attach(dev);
15879                 tg3_netif_start(tp);
15880
15881 out:
15882                 tg3_full_unlock(tp);
15883
15884                 if (!err2)
15885                         tg3_phy_start(tp);
15886         }
15887
15888         return err;
15889 }
15890
15891 static int tg3_resume(struct device *device)
15892 {
15893         struct pci_dev *pdev = to_pci_dev(device);
15894         struct net_device *dev = pci_get_drvdata(pdev);
15895         struct tg3 *tp = netdev_priv(dev);
15896         int err;
15897
15898         if (!netif_running(dev))
15899                 return 0;
15900
15901         netif_device_attach(dev);
15902
15903         tg3_full_lock(tp, 0);
15904
15905         tg3_flag_set(tp, INIT_COMPLETE);
15906         err = tg3_restart_hw(tp, 1);
15907         if (err)
15908                 goto out;
15909
15910         tp->timer.expires = jiffies + tp->timer_offset;
15911         add_timer(&tp->timer);
15912
15913         tg3_netif_start(tp);
15914
15915 out:
15916         tg3_full_unlock(tp);
15917
15918         if (!err)
15919                 tg3_phy_start(tp);
15920
15921         return err;
15922 }
15923
15924 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15925 #define TG3_PM_OPS (&tg3_pm_ops)
15926
15927 #else
15928
15929 #define TG3_PM_OPS NULL
15930
15931 #endif /* CONFIG_PM_SLEEP */
15932
15933 /**
15934  * tg3_io_error_detected - called when PCI error is detected
15935  * @pdev: Pointer to PCI device
15936  * @state: The current pci connection state
15937  *
15938  * This function is called after a PCI bus error affecting
15939  * this device has been detected.
15940  */
15941 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15942                                               pci_channel_state_t state)
15943 {
15944         struct net_device *netdev = pci_get_drvdata(pdev);
15945         struct tg3 *tp = netdev_priv(netdev);
15946         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15947
15948         netdev_info(netdev, "PCI I/O error detected\n");
15949
15950         rtnl_lock();
15951
15952         if (!netif_running(netdev))
15953                 goto done;
15954
15955         tg3_phy_stop(tp);
15956
15957         tg3_netif_stop(tp);
15958
15959         del_timer_sync(&tp->timer);
15960
15961         /* Want to make sure that the reset task doesn't run */
15962         tg3_reset_task_cancel(tp);
15963         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15964
15965         netif_device_detach(netdev);
15966
15967         /* Clean up software state, even if MMIO is blocked */
15968         tg3_full_lock(tp, 0);
15969         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15970         tg3_full_unlock(tp);
15971
15972 done:
15973         if (state == pci_channel_io_perm_failure)
15974                 err = PCI_ERS_RESULT_DISCONNECT;
15975         else
15976                 pci_disable_device(pdev);
15977
15978         rtnl_unlock();
15979
15980         return err;
15981 }
15982
15983 /**
15984  * tg3_io_slot_reset - called after the pci bus has been reset.
15985  * @pdev: Pointer to PCI device
15986  *
15987  * Restart the card from scratch, as if from a cold-boot.
15988  * At this point, the card has exprienced a hard reset,
15989  * followed by fixups by BIOS, and has its config space
15990  * set up identically to what it was at cold boot.
15991  */
15992 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15993 {
15994         struct net_device *netdev = pci_get_drvdata(pdev);
15995         struct tg3 *tp = netdev_priv(netdev);
15996         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15997         int err;
15998
15999         rtnl_lock();
16000
16001         if (pci_enable_device(pdev)) {
16002                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16003                 goto done;
16004         }
16005
16006         pci_set_master(pdev);
16007         pci_restore_state(pdev);
16008         pci_save_state(pdev);
16009
16010         if (!netif_running(netdev)) {
16011                 rc = PCI_ERS_RESULT_RECOVERED;
16012                 goto done;
16013         }
16014
16015         err = tg3_power_up(tp);
16016         if (err)
16017                 goto done;
16018
16019         rc = PCI_ERS_RESULT_RECOVERED;
16020
16021 done:
16022         rtnl_unlock();
16023
16024         return rc;
16025 }
16026
16027 /**
16028  * tg3_io_resume - called when traffic can start flowing again.
16029  * @pdev: Pointer to PCI device
16030  *
16031  * This callback is called when the error recovery driver tells
16032  * us that its OK to resume normal operation.
16033  */
16034 static void tg3_io_resume(struct pci_dev *pdev)
16035 {
16036         struct net_device *netdev = pci_get_drvdata(pdev);
16037         struct tg3 *tp = netdev_priv(netdev);
16038         int err;
16039
16040         rtnl_lock();
16041
16042         if (!netif_running(netdev))
16043                 goto done;
16044
16045         tg3_full_lock(tp, 0);
16046         tg3_flag_set(tp, INIT_COMPLETE);
16047         err = tg3_restart_hw(tp, 1);
16048         tg3_full_unlock(tp);
16049         if (err) {
16050                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16051                 goto done;
16052         }
16053
16054         netif_device_attach(netdev);
16055
16056         tp->timer.expires = jiffies + tp->timer_offset;
16057         add_timer(&tp->timer);
16058
16059         tg3_netif_start(tp);
16060
16061         tg3_phy_start(tp);
16062
16063 done:
16064         rtnl_unlock();
16065 }
16066
16067 static struct pci_error_handlers tg3_err_handler = {
16068         .error_detected = tg3_io_error_detected,
16069         .slot_reset     = tg3_io_slot_reset,
16070         .resume         = tg3_io_resume
16071 };
16072
16073 static struct pci_driver tg3_driver = {
16074         .name           = DRV_MODULE_NAME,
16075         .id_table       = tg3_pci_tbl,
16076         .probe          = tg3_init_one,
16077         .remove         = __devexit_p(tg3_remove_one),
16078         .err_handler    = &tg3_err_handler,
16079         .driver.pm      = TG3_PM_OPS,
16080 };
16081
16082 static int __init tg3_init(void)
16083 {
16084         return pci_register_driver(&tg3_driver);
16085 }
16086
16087 static void __exit tg3_cleanup(void)
16088 {
16089         pci_unregister_driver(&tg3_driver);
16090 }
16091
16092 module_init(tg3_init);
16093 module_exit(tg3_cleanup);