]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Fix copper autoneg adv checks
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2713 {
2714         u32 val;
2715
2716         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2718                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2719                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2720
2721                         sg_dig_ctrl |=
2722                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2723                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2724                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2725                 }
2726                 return;
2727         }
2728
2729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2730                 tg3_bmcr_reset(tp);
2731                 val = tr32(GRC_MISC_CFG);
2732                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2733                 udelay(40);
2734                 return;
2735         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2736                 u32 phytest;
2737                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2738                         u32 phy;
2739
2740                         tg3_writephy(tp, MII_ADVERTISE, 0);
2741                         tg3_writephy(tp, MII_BMCR,
2742                                      BMCR_ANENABLE | BMCR_ANRESTART);
2743
2744                         tg3_writephy(tp, MII_TG3_FET_TEST,
2745                                      phytest | MII_TG3_FET_SHADOW_EN);
2746                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2747                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2748                                 tg3_writephy(tp,
2749                                              MII_TG3_FET_SHDW_AUXMODE4,
2750                                              phy);
2751                         }
2752                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2753                 }
2754                 return;
2755         } else if (do_low_power) {
2756                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2758
2759                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2760                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2761                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2762                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2763         }
2764
2765         /* The PHY should not be powered down on some chips because
2766          * of bugs.
2767          */
2768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2771              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2772                 return;
2773
2774         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2775             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2776                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2777                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2778                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2779                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2780         }
2781
2782         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2783 }
2784
2785 /* tp->lock is held. */
2786 static int tg3_nvram_lock(struct tg3 *tp)
2787 {
2788         if (tg3_flag(tp, NVRAM)) {
2789                 int i;
2790
2791                 if (tp->nvram_lock_cnt == 0) {
2792                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2793                         for (i = 0; i < 8000; i++) {
2794                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2795                                         break;
2796                                 udelay(20);
2797                         }
2798                         if (i == 8000) {
2799                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2800                                 return -ENODEV;
2801                         }
2802                 }
2803                 tp->nvram_lock_cnt++;
2804         }
2805         return 0;
2806 }
2807
2808 /* tp->lock is held. */
2809 static void tg3_nvram_unlock(struct tg3 *tp)
2810 {
2811         if (tg3_flag(tp, NVRAM)) {
2812                 if (tp->nvram_lock_cnt > 0)
2813                         tp->nvram_lock_cnt--;
2814                 if (tp->nvram_lock_cnt == 0)
2815                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2816         }
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_enable_nvram_access(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2823                 u32 nvaccess = tr32(NVRAM_ACCESS);
2824
2825                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_disable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2836         }
2837 }
2838
2839 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2840                                         u32 offset, u32 *val)
2841 {
2842         u32 tmp;
2843         int i;
2844
2845         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2846                 return -EINVAL;
2847
2848         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2849                                         EEPROM_ADDR_DEVID_MASK |
2850                                         EEPROM_ADDR_READ);
2851         tw32(GRC_EEPROM_ADDR,
2852              tmp |
2853              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2854              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2855               EEPROM_ADDR_ADDR_MASK) |
2856              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2857
2858         for (i = 0; i < 1000; i++) {
2859                 tmp = tr32(GRC_EEPROM_ADDR);
2860
2861                 if (tmp & EEPROM_ADDR_COMPLETE)
2862                         break;
2863                 msleep(1);
2864         }
2865         if (!(tmp & EEPROM_ADDR_COMPLETE))
2866                 return -EBUSY;
2867
2868         tmp = tr32(GRC_EEPROM_DATA);
2869
2870         /*
2871          * The data will always be opposite the native endian
2872          * format.  Perform a blind byteswap to compensate.
2873          */
2874         *val = swab32(tmp);
2875
2876         return 0;
2877 }
2878
2879 #define NVRAM_CMD_TIMEOUT 10000
2880
2881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2882 {
2883         int i;
2884
2885         tw32(NVRAM_CMD, nvram_cmd);
2886         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2887                 udelay(10);
2888                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2889                         udelay(10);
2890                         break;
2891                 }
2892         }
2893
2894         if (i == NVRAM_CMD_TIMEOUT)
2895                 return -EBUSY;
2896
2897         return 0;
2898 }
2899
2900 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2901 {
2902         if (tg3_flag(tp, NVRAM) &&
2903             tg3_flag(tp, NVRAM_BUFFERED) &&
2904             tg3_flag(tp, FLASH) &&
2905             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2906             (tp->nvram_jedecnum == JEDEC_ATMEL))
2907
2908                 addr = ((addr / tp->nvram_pagesize) <<
2909                         ATMEL_AT45DB0X1B_PAGE_POS) +
2910                        (addr % tp->nvram_pagesize);
2911
2912         return addr;
2913 }
2914
2915 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2916 {
2917         if (tg3_flag(tp, NVRAM) &&
2918             tg3_flag(tp, NVRAM_BUFFERED) &&
2919             tg3_flag(tp, FLASH) &&
2920             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2921             (tp->nvram_jedecnum == JEDEC_ATMEL))
2922
2923                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2924                         tp->nvram_pagesize) +
2925                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2926
2927         return addr;
2928 }
2929
2930 /* NOTE: Data read in from NVRAM is byteswapped according to
2931  * the byteswapping settings for all other register accesses.
2932  * tg3 devices are BE devices, so on a BE machine, the data
2933  * returned will be exactly as it is seen in NVRAM.  On a LE
2934  * machine, the 32-bit value will be byteswapped.
2935  */
2936 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2937 {
2938         int ret;
2939
2940         if (!tg3_flag(tp, NVRAM))
2941                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2942
2943         offset = tg3_nvram_phys_addr(tp, offset);
2944
2945         if (offset > NVRAM_ADDR_MSK)
2946                 return -EINVAL;
2947
2948         ret = tg3_nvram_lock(tp);
2949         if (ret)
2950                 return ret;
2951
2952         tg3_enable_nvram_access(tp);
2953
2954         tw32(NVRAM_ADDR, offset);
2955         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2956                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2957
2958         if (ret == 0)
2959                 *val = tr32(NVRAM_RDDATA);
2960
2961         tg3_disable_nvram_access(tp);
2962
2963         tg3_nvram_unlock(tp);
2964
2965         return ret;
2966 }
2967
2968 /* Ensures NVRAM data is in bytestream format. */
2969 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2970 {
2971         u32 v;
2972         int res = tg3_nvram_read(tp, offset, &v);
2973         if (!res)
2974                 *val = cpu_to_be32(v);
2975         return res;
2976 }
2977
2978 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2979                                     u32 offset, u32 len, u8 *buf)
2980 {
2981         int i, j, rc = 0;
2982         u32 val;
2983
2984         for (i = 0; i < len; i += 4) {
2985                 u32 addr;
2986                 __be32 data;
2987
2988                 addr = offset + i;
2989
2990                 memcpy(&data, buf + i, 4);
2991
2992                 /*
2993                  * The SEEPROM interface expects the data to always be opposite
2994                  * the native endian format.  We accomplish this by reversing
2995                  * all the operations that would have been performed on the
2996                  * data from a call to tg3_nvram_read_be32().
2997                  */
2998                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
2999
3000                 val = tr32(GRC_EEPROM_ADDR);
3001                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3002
3003                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3004                         EEPROM_ADDR_READ);
3005                 tw32(GRC_EEPROM_ADDR, val |
3006                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3007                         (addr & EEPROM_ADDR_ADDR_MASK) |
3008                         EEPROM_ADDR_START |
3009                         EEPROM_ADDR_WRITE);
3010
3011                 for (j = 0; j < 1000; j++) {
3012                         val = tr32(GRC_EEPROM_ADDR);
3013
3014                         if (val & EEPROM_ADDR_COMPLETE)
3015                                 break;
3016                         msleep(1);
3017                 }
3018                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3019                         rc = -EBUSY;
3020                         break;
3021                 }
3022         }
3023
3024         return rc;
3025 }
3026
3027 /* offset and length are dword aligned */
3028 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3029                 u8 *buf)
3030 {
3031         int ret = 0;
3032         u32 pagesize = tp->nvram_pagesize;
3033         u32 pagemask = pagesize - 1;
3034         u32 nvram_cmd;
3035         u8 *tmp;
3036
3037         tmp = kmalloc(pagesize, GFP_KERNEL);
3038         if (tmp == NULL)
3039                 return -ENOMEM;
3040
3041         while (len) {
3042                 int j;
3043                 u32 phy_addr, page_off, size;
3044
3045                 phy_addr = offset & ~pagemask;
3046
3047                 for (j = 0; j < pagesize; j += 4) {
3048                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3049                                                   (__be32 *) (tmp + j));
3050                         if (ret)
3051                                 break;
3052                 }
3053                 if (ret)
3054                         break;
3055
3056                 page_off = offset & pagemask;
3057                 size = pagesize;
3058                 if (len < size)
3059                         size = len;
3060
3061                 len -= size;
3062
3063                 memcpy(tmp + page_off, buf, size);
3064
3065                 offset = offset + (pagesize - page_off);
3066
3067                 tg3_enable_nvram_access(tp);
3068
3069                 /*
3070                  * Before we can erase the flash page, we need
3071                  * to issue a special "write enable" command.
3072                  */
3073                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3074
3075                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3076                         break;
3077
3078                 /* Erase the target page */
3079                 tw32(NVRAM_ADDR, phy_addr);
3080
3081                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3082                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3083
3084                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3085                         break;
3086
3087                 /* Issue another write enable to start the write. */
3088                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3089
3090                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3091                         break;
3092
3093                 for (j = 0; j < pagesize; j += 4) {
3094                         __be32 data;
3095
3096                         data = *((__be32 *) (tmp + j));
3097
3098                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3099
3100                         tw32(NVRAM_ADDR, phy_addr + j);
3101
3102                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3103                                 NVRAM_CMD_WR;
3104
3105                         if (j == 0)
3106                                 nvram_cmd |= NVRAM_CMD_FIRST;
3107                         else if (j == (pagesize - 4))
3108                                 nvram_cmd |= NVRAM_CMD_LAST;
3109
3110                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3111                         if (ret)
3112                                 break;
3113                 }
3114                 if (ret)
3115                         break;
3116         }
3117
3118         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3119         tg3_nvram_exec_cmd(tp, nvram_cmd);
3120
3121         kfree(tmp);
3122
3123         return ret;
3124 }
3125
3126 /* offset and length are dword aligned */
3127 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3128                 u8 *buf)
3129 {
3130         int i, ret = 0;
3131
3132         for (i = 0; i < len; i += 4, offset += 4) {
3133                 u32 page_off, phy_addr, nvram_cmd;
3134                 __be32 data;
3135
3136                 memcpy(&data, buf + i, 4);
3137                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3138
3139                 page_off = offset % tp->nvram_pagesize;
3140
3141                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3142
3143                 tw32(NVRAM_ADDR, phy_addr);
3144
3145                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3146
3147                 if (page_off == 0 || i == 0)
3148                         nvram_cmd |= NVRAM_CMD_FIRST;
3149                 if (page_off == (tp->nvram_pagesize - 4))
3150                         nvram_cmd |= NVRAM_CMD_LAST;
3151
3152                 if (i == (len - 4))
3153                         nvram_cmd |= NVRAM_CMD_LAST;
3154
3155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3156                     !tg3_flag(tp, 5755_PLUS) &&
3157                     (tp->nvram_jedecnum == JEDEC_ST) &&
3158                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3159                         u32 cmd;
3160
3161                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3162                         ret = tg3_nvram_exec_cmd(tp, cmd);
3163                         if (ret)
3164                                 break;
3165                 }
3166                 if (!tg3_flag(tp, FLASH)) {
3167                         /* We always do complete word writes to eeprom. */
3168                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3169                 }
3170
3171                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3172                 if (ret)
3173                         break;
3174         }
3175         return ret;
3176 }
3177
3178 /* offset and length are dword aligned */
3179 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3180 {
3181         int ret;
3182
3183         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3184                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3185                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3186                 udelay(40);
3187         }
3188
3189         if (!tg3_flag(tp, NVRAM)) {
3190                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3191         } else {
3192                 u32 grc_mode;
3193
3194                 ret = tg3_nvram_lock(tp);
3195                 if (ret)
3196                         return ret;
3197
3198                 tg3_enable_nvram_access(tp);
3199                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3200                         tw32(NVRAM_WRITE1, 0x406);
3201
3202                 grc_mode = tr32(GRC_MODE);
3203                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3204
3205                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3206                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3207                                 buf);
3208                 } else {
3209                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3210                                 buf);
3211                 }
3212
3213                 grc_mode = tr32(GRC_MODE);
3214                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3215
3216                 tg3_disable_nvram_access(tp);
3217                 tg3_nvram_unlock(tp);
3218         }
3219
3220         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3221                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3222                 udelay(40);
3223         }
3224
3225         return ret;
3226 }
3227
3228 #define RX_CPU_SCRATCH_BASE     0x30000
3229 #define RX_CPU_SCRATCH_SIZE     0x04000
3230 #define TX_CPU_SCRATCH_BASE     0x34000
3231 #define TX_CPU_SCRATCH_SIZE     0x04000
3232
3233 /* tp->lock is held. */
3234 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3235 {
3236         int i;
3237
3238         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3239
3240         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3241                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3242
3243                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3244                 return 0;
3245         }
3246         if (offset == RX_CPU_BASE) {
3247                 for (i = 0; i < 10000; i++) {
3248                         tw32(offset + CPU_STATE, 0xffffffff);
3249                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3250                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3251                                 break;
3252                 }
3253
3254                 tw32(offset + CPU_STATE, 0xffffffff);
3255                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3256                 udelay(10);
3257         } else {
3258                 for (i = 0; i < 10000; i++) {
3259                         tw32(offset + CPU_STATE, 0xffffffff);
3260                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3261                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3262                                 break;
3263                 }
3264         }
3265
3266         if (i >= 10000) {
3267                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3268                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3269                 return -ENODEV;
3270         }
3271
3272         /* Clear firmware's nvram arbitration. */
3273         if (tg3_flag(tp, NVRAM))
3274                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3275         return 0;
3276 }
3277
3278 struct fw_info {
3279         unsigned int fw_base;
3280         unsigned int fw_len;
3281         const __be32 *fw_data;
3282 };
3283
3284 /* tp->lock is held. */
3285 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3286                                  u32 cpu_scratch_base, int cpu_scratch_size,
3287                                  struct fw_info *info)
3288 {
3289         int err, lock_err, i;
3290         void (*write_op)(struct tg3 *, u32, u32);
3291
3292         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3293                 netdev_err(tp->dev,
3294                            "%s: Trying to load TX cpu firmware which is 5705\n",
3295                            __func__);
3296                 return -EINVAL;
3297         }
3298
3299         if (tg3_flag(tp, 5705_PLUS))
3300                 write_op = tg3_write_mem;
3301         else
3302                 write_op = tg3_write_indirect_reg32;
3303
3304         /* It is possible that bootcode is still loading at this point.
3305          * Get the nvram lock first before halting the cpu.
3306          */
3307         lock_err = tg3_nvram_lock(tp);
3308         err = tg3_halt_cpu(tp, cpu_base);
3309         if (!lock_err)
3310                 tg3_nvram_unlock(tp);
3311         if (err)
3312                 goto out;
3313
3314         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3315                 write_op(tp, cpu_scratch_base + i, 0);
3316         tw32(cpu_base + CPU_STATE, 0xffffffff);
3317         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3318         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3319                 write_op(tp, (cpu_scratch_base +
3320                               (info->fw_base & 0xffff) +
3321                               (i * sizeof(u32))),
3322                               be32_to_cpu(info->fw_data[i]));
3323
3324         err = 0;
3325
3326 out:
3327         return err;
3328 }
3329
3330 /* tp->lock is held. */
3331 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3332 {
3333         struct fw_info info;
3334         const __be32 *fw_data;
3335         int err, i;
3336
3337         fw_data = (void *)tp->fw->data;
3338
3339         /* Firmware blob starts with version numbers, followed by
3340            start address and length. We are setting complete length.
3341            length = end_address_of_bss - start_address_of_text.
3342            Remainder is the blob to be loaded contiguously
3343            from start address. */
3344
3345         info.fw_base = be32_to_cpu(fw_data[1]);
3346         info.fw_len = tp->fw->size - 12;
3347         info.fw_data = &fw_data[3];
3348
3349         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3350                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3351                                     &info);
3352         if (err)
3353                 return err;
3354
3355         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3356                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3357                                     &info);
3358         if (err)
3359                 return err;
3360
3361         /* Now startup only the RX cpu. */
3362         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3363         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3364
3365         for (i = 0; i < 5; i++) {
3366                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3367                         break;
3368                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3369                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3370                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3371                 udelay(1000);
3372         }
3373         if (i >= 5) {
3374                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3375                            "should be %08x\n", __func__,
3376                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3377                 return -ENODEV;
3378         }
3379         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3380         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3381
3382         return 0;
3383 }
3384
3385 /* tp->lock is held. */
3386 static int tg3_load_tso_firmware(struct tg3 *tp)
3387 {
3388         struct fw_info info;
3389         const __be32 *fw_data;
3390         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3391         int err, i;
3392
3393         if (tg3_flag(tp, HW_TSO_1) ||
3394             tg3_flag(tp, HW_TSO_2) ||
3395             tg3_flag(tp, HW_TSO_3))
3396                 return 0;
3397
3398         fw_data = (void *)tp->fw->data;
3399
3400         /* Firmware blob starts with version numbers, followed by
3401            start address and length. We are setting complete length.
3402            length = end_address_of_bss - start_address_of_text.
3403            Remainder is the blob to be loaded contiguously
3404            from start address. */
3405
3406         info.fw_base = be32_to_cpu(fw_data[1]);
3407         cpu_scratch_size = tp->fw_len;
3408         info.fw_len = tp->fw->size - 12;
3409         info.fw_data = &fw_data[3];
3410
3411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3412                 cpu_base = RX_CPU_BASE;
3413                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3414         } else {
3415                 cpu_base = TX_CPU_BASE;
3416                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3417                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3418         }
3419
3420         err = tg3_load_firmware_cpu(tp, cpu_base,
3421                                     cpu_scratch_base, cpu_scratch_size,
3422                                     &info);
3423         if (err)
3424                 return err;
3425
3426         /* Now startup the cpu. */
3427         tw32(cpu_base + CPU_STATE, 0xffffffff);
3428         tw32_f(cpu_base + CPU_PC, info.fw_base);
3429
3430         for (i = 0; i < 5; i++) {
3431                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3432                         break;
3433                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3434                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3435                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3436                 udelay(1000);
3437         }
3438         if (i >= 5) {
3439                 netdev_err(tp->dev,
3440                            "%s fails to set CPU PC, is %08x should be %08x\n",
3441                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3442                 return -ENODEV;
3443         }
3444         tw32(cpu_base + CPU_STATE, 0xffffffff);
3445         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3446         return 0;
3447 }
3448
3449
3450 /* tp->lock is held. */
3451 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3452 {
3453         u32 addr_high, addr_low;
3454         int i;
3455
3456         addr_high = ((tp->dev->dev_addr[0] << 8) |
3457                      tp->dev->dev_addr[1]);
3458         addr_low = ((tp->dev->dev_addr[2] << 24) |
3459                     (tp->dev->dev_addr[3] << 16) |
3460                     (tp->dev->dev_addr[4] <<  8) |
3461                     (tp->dev->dev_addr[5] <<  0));
3462         for (i = 0; i < 4; i++) {
3463                 if (i == 1 && skip_mac_1)
3464                         continue;
3465                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3466                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3467         }
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3471                 for (i = 0; i < 12; i++) {
3472                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3473                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3474                 }
3475         }
3476
3477         addr_high = (tp->dev->dev_addr[0] +
3478                      tp->dev->dev_addr[1] +
3479                      tp->dev->dev_addr[2] +
3480                      tp->dev->dev_addr[3] +
3481                      tp->dev->dev_addr[4] +
3482                      tp->dev->dev_addr[5]) &
3483                 TX_BACKOFF_SEED_MASK;
3484         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3485 }
3486
3487 static void tg3_enable_register_access(struct tg3 *tp)
3488 {
3489         /*
3490          * Make sure register accesses (indirect or otherwise) will function
3491          * correctly.
3492          */
3493         pci_write_config_dword(tp->pdev,
3494                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3495 }
3496
3497 static int tg3_power_up(struct tg3 *tp)
3498 {
3499         int err;
3500
3501         tg3_enable_register_access(tp);
3502
3503         err = pci_set_power_state(tp->pdev, PCI_D0);
3504         if (!err) {
3505                 /* Switch out of Vaux if it is a NIC */
3506                 tg3_pwrsrc_switch_to_vmain(tp);
3507         } else {
3508                 netdev_err(tp->dev, "Transition to D0 failed\n");
3509         }
3510
3511         return err;
3512 }
3513
3514 static int tg3_setup_phy(struct tg3 *, int);
3515
3516 static int tg3_power_down_prepare(struct tg3 *tp)
3517 {
3518         u32 misc_host_ctrl;
3519         bool device_should_wake, do_low_power;
3520
3521         tg3_enable_register_access(tp);
3522
3523         /* Restore the CLKREQ setting. */
3524         if (tg3_flag(tp, CLKREQ_BUG)) {
3525                 u16 lnkctl;
3526
3527                 pci_read_config_word(tp->pdev,
3528                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3529                                      &lnkctl);
3530                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3531                 pci_write_config_word(tp->pdev,
3532                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3533                                       lnkctl);
3534         }
3535
3536         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3537         tw32(TG3PCI_MISC_HOST_CTRL,
3538              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3539
3540         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3541                              tg3_flag(tp, WOL_ENABLE);
3542
3543         if (tg3_flag(tp, USE_PHYLIB)) {
3544                 do_low_power = false;
3545                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3546                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3547                         struct phy_device *phydev;
3548                         u32 phyid, advertising;
3549
3550                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3551
3552                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3553
3554                         tp->link_config.orig_speed = phydev->speed;
3555                         tp->link_config.orig_duplex = phydev->duplex;
3556                         tp->link_config.orig_autoneg = phydev->autoneg;
3557                         tp->link_config.orig_advertising = phydev->advertising;
3558
3559                         advertising = ADVERTISED_TP |
3560                                       ADVERTISED_Pause |
3561                                       ADVERTISED_Autoneg |
3562                                       ADVERTISED_10baseT_Half;
3563
3564                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3565                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3566                                         advertising |=
3567                                                 ADVERTISED_100baseT_Half |
3568                                                 ADVERTISED_100baseT_Full |
3569                                                 ADVERTISED_10baseT_Full;
3570                                 else
3571                                         advertising |= ADVERTISED_10baseT_Full;
3572                         }
3573
3574                         phydev->advertising = advertising;
3575
3576                         phy_start_aneg(phydev);
3577
3578                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3579                         if (phyid != PHY_ID_BCMAC131) {
3580                                 phyid &= PHY_BCM_OUI_MASK;
3581                                 if (phyid == PHY_BCM_OUI_1 ||
3582                                     phyid == PHY_BCM_OUI_2 ||
3583                                     phyid == PHY_BCM_OUI_3)
3584                                         do_low_power = true;
3585                         }
3586                 }
3587         } else {
3588                 do_low_power = true;
3589
3590                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3591                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3592                         tp->link_config.orig_speed = tp->link_config.speed;
3593                         tp->link_config.orig_duplex = tp->link_config.duplex;
3594                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3595                 }
3596
3597                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3598                         tp->link_config.speed = SPEED_10;
3599                         tp->link_config.duplex = DUPLEX_HALF;
3600                         tp->link_config.autoneg = AUTONEG_ENABLE;
3601                         tg3_setup_phy(tp, 0);
3602                 }
3603         }
3604
3605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3606                 u32 val;
3607
3608                 val = tr32(GRC_VCPU_EXT_CTRL);
3609                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3610         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3611                 int i;
3612                 u32 val;
3613
3614                 for (i = 0; i < 200; i++) {
3615                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3616                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3617                                 break;
3618                         msleep(1);
3619                 }
3620         }
3621         if (tg3_flag(tp, WOL_CAP))
3622                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3623                                                      WOL_DRV_STATE_SHUTDOWN |
3624                                                      WOL_DRV_WOL |
3625                                                      WOL_SET_MAGIC_PKT);
3626
3627         if (device_should_wake) {
3628                 u32 mac_mode;
3629
3630                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3631                         if (do_low_power &&
3632                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3633                                 tg3_phy_auxctl_write(tp,
3634                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3635                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3636                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3637                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3638                                 udelay(40);
3639                         }
3640
3641                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3642                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3643                         else
3644                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3645
3646                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3648                             ASIC_REV_5700) {
3649                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3650                                              SPEED_100 : SPEED_10;
3651                                 if (tg3_5700_link_polarity(tp, speed))
3652                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3653                                 else
3654                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3655                         }
3656                 } else {
3657                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3658                 }
3659
3660                 if (!tg3_flag(tp, 5750_PLUS))
3661                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3662
3663                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3664                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3665                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3666                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3667
3668                 if (tg3_flag(tp, ENABLE_APE))
3669                         mac_mode |= MAC_MODE_APE_TX_EN |
3670                                     MAC_MODE_APE_RX_EN |
3671                                     MAC_MODE_TDE_ENABLE;
3672
3673                 tw32_f(MAC_MODE, mac_mode);
3674                 udelay(100);
3675
3676                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3677                 udelay(10);
3678         }
3679
3680         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3681             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3682              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3683                 u32 base_val;
3684
3685                 base_val = tp->pci_clock_ctrl;
3686                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3687                              CLOCK_CTRL_TXCLK_DISABLE);
3688
3689                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3690                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3691         } else if (tg3_flag(tp, 5780_CLASS) ||
3692                    tg3_flag(tp, CPMU_PRESENT) ||
3693                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3694                 /* do nothing */
3695         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3696                 u32 newbits1, newbits2;
3697
3698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3699                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3700                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3701                                     CLOCK_CTRL_TXCLK_DISABLE |
3702                                     CLOCK_CTRL_ALTCLK);
3703                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3704                 } else if (tg3_flag(tp, 5705_PLUS)) {
3705                         newbits1 = CLOCK_CTRL_625_CORE;
3706                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3707                 } else {
3708                         newbits1 = CLOCK_CTRL_ALTCLK;
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 }
3711
3712                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3713                             40);
3714
3715                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3716                             40);
3717
3718                 if (!tg3_flag(tp, 5705_PLUS)) {
3719                         u32 newbits3;
3720
3721                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3722                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3723                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3724                                             CLOCK_CTRL_TXCLK_DISABLE |
3725                                             CLOCK_CTRL_44MHZ_CORE);
3726                         } else {
3727                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3728                         }
3729
3730                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3731                                     tp->pci_clock_ctrl | newbits3, 40);
3732                 }
3733         }
3734
3735         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3736                 tg3_power_down_phy(tp, do_low_power);
3737
3738         tg3_frob_aux_power(tp, true);
3739
3740         /* Workaround for unstable PLL clock */
3741         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3742             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3743                 u32 val = tr32(0x7d00);
3744
3745                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3746                 tw32(0x7d00, val);
3747                 if (!tg3_flag(tp, ENABLE_ASF)) {
3748                         int err;
3749
3750                         err = tg3_nvram_lock(tp);
3751                         tg3_halt_cpu(tp, RX_CPU_BASE);
3752                         if (!err)
3753                                 tg3_nvram_unlock(tp);
3754                 }
3755         }
3756
3757         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3758
3759         return 0;
3760 }
3761
3762 static void tg3_power_down(struct tg3 *tp)
3763 {
3764         tg3_power_down_prepare(tp);
3765
3766         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3767         pci_set_power_state(tp->pdev, PCI_D3hot);
3768 }
3769
3770 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3771 {
3772         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3773         case MII_TG3_AUX_STAT_10HALF:
3774                 *speed = SPEED_10;
3775                 *duplex = DUPLEX_HALF;
3776                 break;
3777
3778         case MII_TG3_AUX_STAT_10FULL:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_FULL;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_100HALF:
3784                 *speed = SPEED_100;
3785                 *duplex = DUPLEX_HALF;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100FULL:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_FULL;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_1000HALF:
3794                 *speed = SPEED_1000;
3795                 *duplex = DUPLEX_HALF;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000FULL:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_FULL;
3801                 break;
3802
3803         default:
3804                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3805                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3806                                  SPEED_10;
3807                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3808                                   DUPLEX_HALF;
3809                         break;
3810                 }
3811                 *speed = SPEED_INVALID;
3812                 *duplex = DUPLEX_INVALID;
3813                 break;
3814         }
3815 }
3816
3817 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3818 {
3819         int err = 0;
3820         u32 val, new_adv;
3821
3822         new_adv = ADVERTISE_CSMA;
3823         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3824         new_adv |= mii_advertise_flowctrl(flowctrl);
3825
3826         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3827         if (err)
3828                 goto done;
3829
3830         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3831                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3832
3833                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3834                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3835                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3836
3837                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3838                 if (err)
3839                         goto done;
3840         }
3841
3842         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3843                 goto done;
3844
3845         tw32(TG3_CPMU_EEE_MODE,
3846              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3847
3848         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3849         if (!err) {
3850                 u32 err2;
3851
3852                 val = 0;
3853                 /* Advertise 100-BaseTX EEE ability */
3854                 if (advertise & ADVERTISED_100baseT_Full)
3855                         val |= MDIO_AN_EEE_ADV_100TX;
3856                 /* Advertise 1000-BaseT EEE ability */
3857                 if (advertise & ADVERTISED_1000baseT_Full)
3858                         val |= MDIO_AN_EEE_ADV_1000T;
3859                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3860                 if (err)
3861                         val = 0;
3862
3863                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3864                 case ASIC_REV_5717:
3865                 case ASIC_REV_57765:
3866                 case ASIC_REV_57766:
3867                 case ASIC_REV_5719:
3868                         /* If we advertised any eee advertisements above... */
3869                         if (val)
3870                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3871                                       MII_TG3_DSP_TAP26_RMRXSTO |
3872                                       MII_TG3_DSP_TAP26_OPCSINPT;
3873                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3874                         /* Fall through */
3875                 case ASIC_REV_5720:
3876                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3877                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3878                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3879                 }
3880
3881                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3882                 if (!err)
3883                         err = err2;
3884         }
3885
3886 done:
3887         return err;
3888 }
3889
3890 static void tg3_phy_copper_begin(struct tg3 *tp)
3891 {
3892         u32 new_adv;
3893         int i;
3894
3895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3896                 new_adv = ADVERTISED_10baseT_Half |
3897                           ADVERTISED_10baseT_Full;
3898                 if (tg3_flag(tp, WOL_SPEED_100MB))
3899                         new_adv |= ADVERTISED_100baseT_Half |
3900                                    ADVERTISED_100baseT_Full;
3901
3902                 tg3_phy_autoneg_cfg(tp, new_adv,
3903                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3904         } else if (tp->link_config.speed == SPEED_INVALID) {
3905                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3906                         tp->link_config.advertising &=
3907                                 ~(ADVERTISED_1000baseT_Half |
3908                                   ADVERTISED_1000baseT_Full);
3909
3910                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3911                                     tp->link_config.flowctrl);
3912         } else {
3913                 /* Asking for a specific link mode. */
3914                 if (tp->link_config.speed == SPEED_1000) {
3915                         if (tp->link_config.duplex == DUPLEX_FULL)
3916                                 new_adv = ADVERTISED_1000baseT_Full;
3917                         else
3918                                 new_adv = ADVERTISED_1000baseT_Half;
3919                 } else if (tp->link_config.speed == SPEED_100) {
3920                         if (tp->link_config.duplex == DUPLEX_FULL)
3921                                 new_adv = ADVERTISED_100baseT_Full;
3922                         else
3923                                 new_adv = ADVERTISED_100baseT_Half;
3924                 } else {
3925                         if (tp->link_config.duplex == DUPLEX_FULL)
3926                                 new_adv = ADVERTISED_10baseT_Full;
3927                         else
3928                                 new_adv = ADVERTISED_10baseT_Half;
3929                 }
3930
3931                 tg3_phy_autoneg_cfg(tp, new_adv,
3932                                     tp->link_config.flowctrl);
3933         }
3934
3935         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3936             tp->link_config.speed != SPEED_INVALID) {
3937                 u32 bmcr, orig_bmcr;
3938
3939                 tp->link_config.active_speed = tp->link_config.speed;
3940                 tp->link_config.active_duplex = tp->link_config.duplex;
3941
3942                 bmcr = 0;
3943                 switch (tp->link_config.speed) {
3944                 default:
3945                 case SPEED_10:
3946                         break;
3947
3948                 case SPEED_100:
3949                         bmcr |= BMCR_SPEED100;
3950                         break;
3951
3952                 case SPEED_1000:
3953                         bmcr |= BMCR_SPEED1000;
3954                         break;
3955                 }
3956
3957                 if (tp->link_config.duplex == DUPLEX_FULL)
3958                         bmcr |= BMCR_FULLDPLX;
3959
3960                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3961                     (bmcr != orig_bmcr)) {
3962                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3963                         for (i = 0; i < 1500; i++) {
3964                                 u32 tmp;
3965
3966                                 udelay(10);
3967                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3968                                     tg3_readphy(tp, MII_BMSR, &tmp))
3969                                         continue;
3970                                 if (!(tmp & BMSR_LSTATUS)) {
3971                                         udelay(40);
3972                                         break;
3973                                 }
3974                         }
3975                         tg3_writephy(tp, MII_BMCR, bmcr);
3976                         udelay(40);
3977                 }
3978         } else {
3979                 tg3_writephy(tp, MII_BMCR,
3980                              BMCR_ANENABLE | BMCR_ANRESTART);
3981         }
3982 }
3983
3984 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3985 {
3986         int err;
3987
3988         /* Turn off tap power management. */
3989         /* Set Extended packet length bit */
3990         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3991
3992         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3993         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3994         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3995         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3996         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3997
3998         udelay(40);
3999
4000         return err;
4001 }
4002
4003 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4004 {
4005         u32 advmsk, tgtadv, advertising;
4006
4007         advertising = tp->link_config.advertising;
4008         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4009
4010         advmsk = ADVERTISE_ALL;
4011         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4012                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4013                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4014         }
4015
4016         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4017                 return false;
4018
4019         if ((*lcladv & advmsk) != tgtadv)
4020                 return false;
4021
4022         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4023                 u32 tg3_ctrl;
4024
4025                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4026
4027                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4028                         return false;
4029
4030                 if (tgtadv &&
4031                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4032                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4033                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4034                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4035                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4036                 } else {
4037                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4038                 }
4039
4040                 if (tg3_ctrl != tgtadv)
4041                         return false;
4042         }
4043
4044         return true;
4045 }
4046
4047 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4048 {
4049         u32 lpeth = 0;
4050
4051         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4052                 u32 val;
4053
4054                 if (tg3_readphy(tp, MII_STAT1000, &val))
4055                         return false;
4056
4057                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4058         }
4059
4060         if (tg3_readphy(tp, MII_LPA, rmtadv))
4061                 return false;
4062
4063         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4064         tp->link_config.rmt_adv = lpeth;
4065
4066         return true;
4067 }
4068
4069 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4070 {
4071         int current_link_up;
4072         u32 bmsr, val;
4073         u32 lcl_adv, rmt_adv;
4074         u16 current_speed;
4075         u8 current_duplex;
4076         int i, err;
4077
4078         tw32(MAC_EVENT, 0);
4079
4080         tw32_f(MAC_STATUS,
4081              (MAC_STATUS_SYNC_CHANGED |
4082               MAC_STATUS_CFG_CHANGED |
4083               MAC_STATUS_MI_COMPLETION |
4084               MAC_STATUS_LNKSTATE_CHANGED));
4085         udelay(40);
4086
4087         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4088                 tw32_f(MAC_MI_MODE,
4089                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4090                 udelay(80);
4091         }
4092
4093         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4094
4095         /* Some third-party PHYs need to be reset on link going
4096          * down.
4097          */
4098         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4099              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4100              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4101             netif_carrier_ok(tp->dev)) {
4102                 tg3_readphy(tp, MII_BMSR, &bmsr);
4103                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4104                     !(bmsr & BMSR_LSTATUS))
4105                         force_reset = 1;
4106         }
4107         if (force_reset)
4108                 tg3_phy_reset(tp);
4109
4110         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4111                 tg3_readphy(tp, MII_BMSR, &bmsr);
4112                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4113                     !tg3_flag(tp, INIT_COMPLETE))
4114                         bmsr = 0;
4115
4116                 if (!(bmsr & BMSR_LSTATUS)) {
4117                         err = tg3_init_5401phy_dsp(tp);
4118                         if (err)
4119                                 return err;
4120
4121                         tg3_readphy(tp, MII_BMSR, &bmsr);
4122                         for (i = 0; i < 1000; i++) {
4123                                 udelay(10);
4124                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4125                                     (bmsr & BMSR_LSTATUS)) {
4126                                         udelay(40);
4127                                         break;
4128                                 }
4129                         }
4130
4131                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4132                             TG3_PHY_REV_BCM5401_B0 &&
4133                             !(bmsr & BMSR_LSTATUS) &&
4134                             tp->link_config.active_speed == SPEED_1000) {
4135                                 err = tg3_phy_reset(tp);
4136                                 if (!err)
4137                                         err = tg3_init_5401phy_dsp(tp);
4138                                 if (err)
4139                                         return err;
4140                         }
4141                 }
4142         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4143                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4144                 /* 5701 {A0,B0} CRC bug workaround */
4145                 tg3_writephy(tp, 0x15, 0x0a75);
4146                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4147                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4148                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4149         }
4150
4151         /* Clear pending interrupts... */
4152         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4153         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4154
4155         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4156                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4157         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4158                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4159
4160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4162                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4163                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4164                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4165                 else
4166                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4167         }
4168
4169         current_link_up = 0;
4170         current_speed = SPEED_INVALID;
4171         current_duplex = DUPLEX_INVALID;
4172         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4173         tp->link_config.rmt_adv = 0;
4174
4175         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4176                 err = tg3_phy_auxctl_read(tp,
4177                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4178                                           &val);
4179                 if (!err && !(val & (1 << 10))) {
4180                         tg3_phy_auxctl_write(tp,
4181                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4182                                              val | (1 << 10));
4183                         goto relink;
4184                 }
4185         }
4186
4187         bmsr = 0;
4188         for (i = 0; i < 100; i++) {
4189                 tg3_readphy(tp, MII_BMSR, &bmsr);
4190                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4191                     (bmsr & BMSR_LSTATUS))
4192                         break;
4193                 udelay(40);
4194         }
4195
4196         if (bmsr & BMSR_LSTATUS) {
4197                 u32 aux_stat, bmcr;
4198
4199                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4200                 for (i = 0; i < 2000; i++) {
4201                         udelay(10);
4202                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4203                             aux_stat)
4204                                 break;
4205                 }
4206
4207                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4208                                              &current_speed,
4209                                              &current_duplex);
4210
4211                 bmcr = 0;
4212                 for (i = 0; i < 200; i++) {
4213                         tg3_readphy(tp, MII_BMCR, &bmcr);
4214                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4215                                 continue;
4216                         if (bmcr && bmcr != 0x7fff)
4217                                 break;
4218                         udelay(10);
4219                 }
4220
4221                 lcl_adv = 0;
4222                 rmt_adv = 0;
4223
4224                 tp->link_config.active_speed = current_speed;
4225                 tp->link_config.active_duplex = current_duplex;
4226
4227                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4228                         if ((bmcr & BMCR_ANENABLE) &&
4229                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4230                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4231                                 current_link_up = 1;
4232                 } else {
4233                         if (!(bmcr & BMCR_ANENABLE) &&
4234                             tp->link_config.speed == current_speed &&
4235                             tp->link_config.duplex == current_duplex &&
4236                             tp->link_config.flowctrl ==
4237                             tp->link_config.active_flowctrl) {
4238                                 current_link_up = 1;
4239                         }
4240                 }
4241
4242                 if (current_link_up == 1 &&
4243                     tp->link_config.active_duplex == DUPLEX_FULL) {
4244                         u32 reg, bit;
4245
4246                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4247                                 reg = MII_TG3_FET_GEN_STAT;
4248                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4249                         } else {
4250                                 reg = MII_TG3_EXT_STAT;
4251                                 bit = MII_TG3_EXT_STAT_MDIX;
4252                         }
4253
4254                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4255                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4256
4257                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4258                 }
4259         }
4260
4261 relink:
4262         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4263                 tg3_phy_copper_begin(tp);
4264
4265                 tg3_readphy(tp, MII_BMSR, &bmsr);
4266                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4267                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4268                         current_link_up = 1;
4269         }
4270
4271         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4272         if (current_link_up == 1) {
4273                 if (tp->link_config.active_speed == SPEED_100 ||
4274                     tp->link_config.active_speed == SPEED_10)
4275                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4276                 else
4277                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4278         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4279                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4280         else
4281                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4282
4283         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4284         if (tp->link_config.active_duplex == DUPLEX_HALF)
4285                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4286
4287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4288                 if (current_link_up == 1 &&
4289                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4290                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4291                 else
4292                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4293         }
4294
4295         /* ??? Without this setting Netgear GA302T PHY does not
4296          * ??? send/receive packets...
4297          */
4298         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4299             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4300                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4301                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4302                 udelay(80);
4303         }
4304
4305         tw32_f(MAC_MODE, tp->mac_mode);
4306         udelay(40);
4307
4308         tg3_phy_eee_adjust(tp, current_link_up);
4309
4310         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4311                 /* Polled via timer. */
4312                 tw32_f(MAC_EVENT, 0);
4313         } else {
4314                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4315         }
4316         udelay(40);
4317
4318         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4319             current_link_up == 1 &&
4320             tp->link_config.active_speed == SPEED_1000 &&
4321             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4322                 udelay(120);
4323                 tw32_f(MAC_STATUS,
4324                      (MAC_STATUS_SYNC_CHANGED |
4325                       MAC_STATUS_CFG_CHANGED));
4326                 udelay(40);
4327                 tg3_write_mem(tp,
4328                               NIC_SRAM_FIRMWARE_MBOX,
4329                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4330         }
4331
4332         /* Prevent send BD corruption. */
4333         if (tg3_flag(tp, CLKREQ_BUG)) {
4334                 u16 oldlnkctl, newlnkctl;
4335
4336                 pci_read_config_word(tp->pdev,
4337                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4338                                      &oldlnkctl);
4339                 if (tp->link_config.active_speed == SPEED_100 ||
4340                     tp->link_config.active_speed == SPEED_10)
4341                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4342                 else
4343                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4344                 if (newlnkctl != oldlnkctl)
4345                         pci_write_config_word(tp->pdev,
4346                                               pci_pcie_cap(tp->pdev) +
4347                                               PCI_EXP_LNKCTL, newlnkctl);
4348         }
4349
4350         if (current_link_up != netif_carrier_ok(tp->dev)) {
4351                 if (current_link_up)
4352                         netif_carrier_on(tp->dev);
4353                 else
4354                         netif_carrier_off(tp->dev);
4355                 tg3_link_report(tp);
4356         }
4357
4358         return 0;
4359 }
4360
4361 struct tg3_fiber_aneginfo {
4362         int state;
4363 #define ANEG_STATE_UNKNOWN              0
4364 #define ANEG_STATE_AN_ENABLE            1
4365 #define ANEG_STATE_RESTART_INIT         2
4366 #define ANEG_STATE_RESTART              3
4367 #define ANEG_STATE_DISABLE_LINK_OK      4
4368 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4369 #define ANEG_STATE_ABILITY_DETECT       6
4370 #define ANEG_STATE_ACK_DETECT_INIT      7
4371 #define ANEG_STATE_ACK_DETECT           8
4372 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4373 #define ANEG_STATE_COMPLETE_ACK         10
4374 #define ANEG_STATE_IDLE_DETECT_INIT     11
4375 #define ANEG_STATE_IDLE_DETECT          12
4376 #define ANEG_STATE_LINK_OK              13
4377 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4378 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4379
4380         u32 flags;
4381 #define MR_AN_ENABLE            0x00000001
4382 #define MR_RESTART_AN           0x00000002
4383 #define MR_AN_COMPLETE          0x00000004
4384 #define MR_PAGE_RX              0x00000008
4385 #define MR_NP_LOADED            0x00000010
4386 #define MR_TOGGLE_TX            0x00000020
4387 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4388 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4389 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4390 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4391 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4392 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4393 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4394 #define MR_TOGGLE_RX            0x00002000
4395 #define MR_NP_RX                0x00004000
4396
4397 #define MR_LINK_OK              0x80000000
4398
4399         unsigned long link_time, cur_time;
4400
4401         u32 ability_match_cfg;
4402         int ability_match_count;
4403
4404         char ability_match, idle_match, ack_match;
4405
4406         u32 txconfig, rxconfig;
4407 #define ANEG_CFG_NP             0x00000080
4408 #define ANEG_CFG_ACK            0x00000040
4409 #define ANEG_CFG_RF2            0x00000020
4410 #define ANEG_CFG_RF1            0x00000010
4411 #define ANEG_CFG_PS2            0x00000001
4412 #define ANEG_CFG_PS1            0x00008000
4413 #define ANEG_CFG_HD             0x00004000
4414 #define ANEG_CFG_FD             0x00002000
4415 #define ANEG_CFG_INVAL          0x00001f06
4416
4417 };
4418 #define ANEG_OK         0
4419 #define ANEG_DONE       1
4420 #define ANEG_TIMER_ENAB 2
4421 #define ANEG_FAILED     -1
4422
4423 #define ANEG_STATE_SETTLE_TIME  10000
4424
4425 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4426                                    struct tg3_fiber_aneginfo *ap)
4427 {
4428         u16 flowctrl;
4429         unsigned long delta;
4430         u32 rx_cfg_reg;
4431         int ret;
4432
4433         if (ap->state == ANEG_STATE_UNKNOWN) {
4434                 ap->rxconfig = 0;
4435                 ap->link_time = 0;
4436                 ap->cur_time = 0;
4437                 ap->ability_match_cfg = 0;
4438                 ap->ability_match_count = 0;
4439                 ap->ability_match = 0;
4440                 ap->idle_match = 0;
4441                 ap->ack_match = 0;
4442         }
4443         ap->cur_time++;
4444
4445         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4446                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4447
4448                 if (rx_cfg_reg != ap->ability_match_cfg) {
4449                         ap->ability_match_cfg = rx_cfg_reg;
4450                         ap->ability_match = 0;
4451                         ap->ability_match_count = 0;
4452                 } else {
4453                         if (++ap->ability_match_count > 1) {
4454                                 ap->ability_match = 1;
4455                                 ap->ability_match_cfg = rx_cfg_reg;
4456                         }
4457                 }
4458                 if (rx_cfg_reg & ANEG_CFG_ACK)
4459                         ap->ack_match = 1;
4460                 else
4461                         ap->ack_match = 0;
4462
4463                 ap->idle_match = 0;
4464         } else {
4465                 ap->idle_match = 1;
4466                 ap->ability_match_cfg = 0;
4467                 ap->ability_match_count = 0;
4468                 ap->ability_match = 0;
4469                 ap->ack_match = 0;
4470
4471                 rx_cfg_reg = 0;
4472         }
4473
4474         ap->rxconfig = rx_cfg_reg;
4475         ret = ANEG_OK;
4476
4477         switch (ap->state) {
4478         case ANEG_STATE_UNKNOWN:
4479                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4480                         ap->state = ANEG_STATE_AN_ENABLE;
4481
4482                 /* fallthru */
4483         case ANEG_STATE_AN_ENABLE:
4484                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4485                 if (ap->flags & MR_AN_ENABLE) {
4486                         ap->link_time = 0;
4487                         ap->cur_time = 0;
4488                         ap->ability_match_cfg = 0;
4489                         ap->ability_match_count = 0;
4490                         ap->ability_match = 0;
4491                         ap->idle_match = 0;
4492                         ap->ack_match = 0;
4493
4494                         ap->state = ANEG_STATE_RESTART_INIT;
4495                 } else {
4496                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4497                 }
4498                 break;
4499
4500         case ANEG_STATE_RESTART_INIT:
4501                 ap->link_time = ap->cur_time;
4502                 ap->flags &= ~(MR_NP_LOADED);
4503                 ap->txconfig = 0;
4504                 tw32(MAC_TX_AUTO_NEG, 0);
4505                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4506                 tw32_f(MAC_MODE, tp->mac_mode);
4507                 udelay(40);
4508
4509                 ret = ANEG_TIMER_ENAB;
4510                 ap->state = ANEG_STATE_RESTART;
4511
4512                 /* fallthru */
4513         case ANEG_STATE_RESTART:
4514                 delta = ap->cur_time - ap->link_time;
4515                 if (delta > ANEG_STATE_SETTLE_TIME)
4516                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4517                 else
4518                         ret = ANEG_TIMER_ENAB;
4519                 break;
4520
4521         case ANEG_STATE_DISABLE_LINK_OK:
4522                 ret = ANEG_DONE;
4523                 break;
4524
4525         case ANEG_STATE_ABILITY_DETECT_INIT:
4526                 ap->flags &= ~(MR_TOGGLE_TX);
4527                 ap->txconfig = ANEG_CFG_FD;
4528                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4529                 if (flowctrl & ADVERTISE_1000XPAUSE)
4530                         ap->txconfig |= ANEG_CFG_PS1;
4531                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4532                         ap->txconfig |= ANEG_CFG_PS2;
4533                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535                 tw32_f(MAC_MODE, tp->mac_mode);
4536                 udelay(40);
4537
4538                 ap->state = ANEG_STATE_ABILITY_DETECT;
4539                 break;
4540
4541         case ANEG_STATE_ABILITY_DETECT:
4542                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4543                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4544                 break;
4545
4546         case ANEG_STATE_ACK_DETECT_INIT:
4547                 ap->txconfig |= ANEG_CFG_ACK;
4548                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4549                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4550                 tw32_f(MAC_MODE, tp->mac_mode);
4551                 udelay(40);
4552
4553                 ap->state = ANEG_STATE_ACK_DETECT;
4554
4555                 /* fallthru */
4556         case ANEG_STATE_ACK_DETECT:
4557                 if (ap->ack_match != 0) {
4558                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4559                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4560                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4561                         } else {
4562                                 ap->state = ANEG_STATE_AN_ENABLE;
4563                         }
4564                 } else if (ap->ability_match != 0 &&
4565                            ap->rxconfig == 0) {
4566                         ap->state = ANEG_STATE_AN_ENABLE;
4567                 }
4568                 break;
4569
4570         case ANEG_STATE_COMPLETE_ACK_INIT:
4571                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4572                         ret = ANEG_FAILED;
4573                         break;
4574                 }
4575                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4576                                MR_LP_ADV_HALF_DUPLEX |
4577                                MR_LP_ADV_SYM_PAUSE |
4578                                MR_LP_ADV_ASYM_PAUSE |
4579                                MR_LP_ADV_REMOTE_FAULT1 |
4580                                MR_LP_ADV_REMOTE_FAULT2 |
4581                                MR_LP_ADV_NEXT_PAGE |
4582                                MR_TOGGLE_RX |
4583                                MR_NP_RX);
4584                 if (ap->rxconfig & ANEG_CFG_FD)
4585                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4586                 if (ap->rxconfig & ANEG_CFG_HD)
4587                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4588                 if (ap->rxconfig & ANEG_CFG_PS1)
4589                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4590                 if (ap->rxconfig & ANEG_CFG_PS2)
4591                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4592                 if (ap->rxconfig & ANEG_CFG_RF1)
4593                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4594                 if (ap->rxconfig & ANEG_CFG_RF2)
4595                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4596                 if (ap->rxconfig & ANEG_CFG_NP)
4597                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4598
4599                 ap->link_time = ap->cur_time;
4600
4601                 ap->flags ^= (MR_TOGGLE_TX);
4602                 if (ap->rxconfig & 0x0008)
4603                         ap->flags |= MR_TOGGLE_RX;
4604                 if (ap->rxconfig & ANEG_CFG_NP)
4605                         ap->flags |= MR_NP_RX;
4606                 ap->flags |= MR_PAGE_RX;
4607
4608                 ap->state = ANEG_STATE_COMPLETE_ACK;
4609                 ret = ANEG_TIMER_ENAB;
4610                 break;
4611
4612         case ANEG_STATE_COMPLETE_ACK:
4613                 if (ap->ability_match != 0 &&
4614                     ap->rxconfig == 0) {
4615                         ap->state = ANEG_STATE_AN_ENABLE;
4616                         break;
4617                 }
4618                 delta = ap->cur_time - ap->link_time;
4619                 if (delta > ANEG_STATE_SETTLE_TIME) {
4620                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4621                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4622                         } else {
4623                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4624                                     !(ap->flags & MR_NP_RX)) {
4625                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4626                                 } else {
4627                                         ret = ANEG_FAILED;
4628                                 }
4629                         }
4630                 }
4631                 break;
4632
4633         case ANEG_STATE_IDLE_DETECT_INIT:
4634                 ap->link_time = ap->cur_time;
4635                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4636                 tw32_f(MAC_MODE, tp->mac_mode);
4637                 udelay(40);
4638
4639                 ap->state = ANEG_STATE_IDLE_DETECT;
4640                 ret = ANEG_TIMER_ENAB;
4641                 break;
4642
4643         case ANEG_STATE_IDLE_DETECT:
4644                 if (ap->ability_match != 0 &&
4645                     ap->rxconfig == 0) {
4646                         ap->state = ANEG_STATE_AN_ENABLE;
4647                         break;
4648                 }
4649                 delta = ap->cur_time - ap->link_time;
4650                 if (delta > ANEG_STATE_SETTLE_TIME) {
4651                         /* XXX another gem from the Broadcom driver :( */
4652                         ap->state = ANEG_STATE_LINK_OK;
4653                 }
4654                 break;
4655
4656         case ANEG_STATE_LINK_OK:
4657                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4658                 ret = ANEG_DONE;
4659                 break;
4660
4661         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4662                 /* ??? unimplemented */
4663                 break;
4664
4665         case ANEG_STATE_NEXT_PAGE_WAIT:
4666                 /* ??? unimplemented */
4667                 break;
4668
4669         default:
4670                 ret = ANEG_FAILED;
4671                 break;
4672         }
4673
4674         return ret;
4675 }
4676
4677 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4678 {
4679         int res = 0;
4680         struct tg3_fiber_aneginfo aninfo;
4681         int status = ANEG_FAILED;
4682         unsigned int tick;
4683         u32 tmp;
4684
4685         tw32_f(MAC_TX_AUTO_NEG, 0);
4686
4687         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4688         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4689         udelay(40);
4690
4691         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4692         udelay(40);
4693
4694         memset(&aninfo, 0, sizeof(aninfo));
4695         aninfo.flags |= MR_AN_ENABLE;
4696         aninfo.state = ANEG_STATE_UNKNOWN;
4697         aninfo.cur_time = 0;
4698         tick = 0;
4699         while (++tick < 195000) {
4700                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4701                 if (status == ANEG_DONE || status == ANEG_FAILED)
4702                         break;
4703
4704                 udelay(1);
4705         }
4706
4707         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4708         tw32_f(MAC_MODE, tp->mac_mode);
4709         udelay(40);
4710
4711         *txflags = aninfo.txconfig;
4712         *rxflags = aninfo.flags;
4713
4714         if (status == ANEG_DONE &&
4715             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4716                              MR_LP_ADV_FULL_DUPLEX)))
4717                 res = 1;
4718
4719         return res;
4720 }
4721
4722 static void tg3_init_bcm8002(struct tg3 *tp)
4723 {
4724         u32 mac_status = tr32(MAC_STATUS);
4725         int i;
4726
4727         /* Reset when initting first time or we have a link. */
4728         if (tg3_flag(tp, INIT_COMPLETE) &&
4729             !(mac_status & MAC_STATUS_PCS_SYNCED))
4730                 return;
4731
4732         /* Set PLL lock range. */
4733         tg3_writephy(tp, 0x16, 0x8007);
4734
4735         /* SW reset */
4736         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4737
4738         /* Wait for reset to complete. */
4739         /* XXX schedule_timeout() ... */
4740         for (i = 0; i < 500; i++)
4741                 udelay(10);
4742
4743         /* Config mode; select PMA/Ch 1 regs. */
4744         tg3_writephy(tp, 0x10, 0x8411);
4745
4746         /* Enable auto-lock and comdet, select txclk for tx. */
4747         tg3_writephy(tp, 0x11, 0x0a10);
4748
4749         tg3_writephy(tp, 0x18, 0x00a0);
4750         tg3_writephy(tp, 0x16, 0x41ff);
4751
4752         /* Assert and deassert POR. */
4753         tg3_writephy(tp, 0x13, 0x0400);
4754         udelay(40);
4755         tg3_writephy(tp, 0x13, 0x0000);
4756
4757         tg3_writephy(tp, 0x11, 0x0a50);
4758         udelay(40);
4759         tg3_writephy(tp, 0x11, 0x0a10);
4760
4761         /* Wait for signal to stabilize */
4762         /* XXX schedule_timeout() ... */
4763         for (i = 0; i < 15000; i++)
4764                 udelay(10);
4765
4766         /* Deselect the channel register so we can read the PHYID
4767          * later.
4768          */
4769         tg3_writephy(tp, 0x10, 0x8011);
4770 }
4771
4772 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4773 {
4774         u16 flowctrl;
4775         u32 sg_dig_ctrl, sg_dig_status;
4776         u32 serdes_cfg, expected_sg_dig_ctrl;
4777         int workaround, port_a;
4778         int current_link_up;
4779
4780         serdes_cfg = 0;
4781         expected_sg_dig_ctrl = 0;
4782         workaround = 0;
4783         port_a = 1;
4784         current_link_up = 0;
4785
4786         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4787             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4788                 workaround = 1;
4789                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4790                         port_a = 0;
4791
4792                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4793                 /* preserve bits 20-23 for voltage regulator */
4794                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4795         }
4796
4797         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4798
4799         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4800                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4801                         if (workaround) {
4802                                 u32 val = serdes_cfg;
4803
4804                                 if (port_a)
4805                                         val |= 0xc010000;
4806                                 else
4807                                         val |= 0x4010000;
4808                                 tw32_f(MAC_SERDES_CFG, val);
4809                         }
4810
4811                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4812                 }
4813                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4814                         tg3_setup_flow_control(tp, 0, 0);
4815                         current_link_up = 1;
4816                 }
4817                 goto out;
4818         }
4819
4820         /* Want auto-negotiation.  */
4821         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4822
4823         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4824         if (flowctrl & ADVERTISE_1000XPAUSE)
4825                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4826         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4827                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4828
4829         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4830                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4831                     tp->serdes_counter &&
4832                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4833                                     MAC_STATUS_RCVD_CFG)) ==
4834                      MAC_STATUS_PCS_SYNCED)) {
4835                         tp->serdes_counter--;
4836                         current_link_up = 1;
4837                         goto out;
4838                 }
4839 restart_autoneg:
4840                 if (workaround)
4841                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4842                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4843                 udelay(5);
4844                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4845
4846                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4847                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4848         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4849                                  MAC_STATUS_SIGNAL_DET)) {
4850                 sg_dig_status = tr32(SG_DIG_STATUS);
4851                 mac_status = tr32(MAC_STATUS);
4852
4853                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4854                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4855                         u32 local_adv = 0, remote_adv = 0;
4856
4857                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4858                                 local_adv |= ADVERTISE_1000XPAUSE;
4859                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4860                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4861
4862                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4863                                 remote_adv |= LPA_1000XPAUSE;
4864                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4865                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4866
4867                         tp->link_config.rmt_adv =
4868                                            mii_adv_to_ethtool_adv_x(remote_adv);
4869
4870                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4871                         current_link_up = 1;
4872                         tp->serdes_counter = 0;
4873                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4874                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4875                         if (tp->serdes_counter)
4876                                 tp->serdes_counter--;
4877                         else {
4878                                 if (workaround) {
4879                                         u32 val = serdes_cfg;
4880
4881                                         if (port_a)
4882                                                 val |= 0xc010000;
4883                                         else
4884                                                 val |= 0x4010000;
4885
4886                                         tw32_f(MAC_SERDES_CFG, val);
4887                                 }
4888
4889                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4890                                 udelay(40);
4891
4892                                 /* Link parallel detection - link is up */
4893                                 /* only if we have PCS_SYNC and not */
4894                                 /* receiving config code words */
4895                                 mac_status = tr32(MAC_STATUS);
4896                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4897                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4898                                         tg3_setup_flow_control(tp, 0, 0);
4899                                         current_link_up = 1;
4900                                         tp->phy_flags |=
4901                                                 TG3_PHYFLG_PARALLEL_DETECT;
4902                                         tp->serdes_counter =
4903                                                 SERDES_PARALLEL_DET_TIMEOUT;
4904                                 } else
4905                                         goto restart_autoneg;
4906                         }
4907                 }
4908         } else {
4909                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4910                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4911         }
4912
4913 out:
4914         return current_link_up;
4915 }
4916
4917 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4918 {
4919         int current_link_up = 0;
4920
4921         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4922                 goto out;
4923
4924         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925                 u32 txflags, rxflags;
4926                 int i;
4927
4928                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4929                         u32 local_adv = 0, remote_adv = 0;
4930
4931                         if (txflags & ANEG_CFG_PS1)
4932                                 local_adv |= ADVERTISE_1000XPAUSE;
4933                         if (txflags & ANEG_CFG_PS2)
4934                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4935
4936                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4937                                 remote_adv |= LPA_1000XPAUSE;
4938                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4939                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4940
4941                         tp->link_config.rmt_adv =
4942                                            mii_adv_to_ethtool_adv_x(remote_adv);
4943
4944                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4945
4946                         current_link_up = 1;
4947                 }
4948                 for (i = 0; i < 30; i++) {
4949                         udelay(20);
4950                         tw32_f(MAC_STATUS,
4951                                (MAC_STATUS_SYNC_CHANGED |
4952                                 MAC_STATUS_CFG_CHANGED));
4953                         udelay(40);
4954                         if ((tr32(MAC_STATUS) &
4955                              (MAC_STATUS_SYNC_CHANGED |
4956                               MAC_STATUS_CFG_CHANGED)) == 0)
4957                                 break;
4958                 }
4959
4960                 mac_status = tr32(MAC_STATUS);
4961                 if (current_link_up == 0 &&
4962                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4963                     !(mac_status & MAC_STATUS_RCVD_CFG))
4964                         current_link_up = 1;
4965         } else {
4966                 tg3_setup_flow_control(tp, 0, 0);
4967
4968                 /* Forcing 1000FD link up. */
4969                 current_link_up = 1;
4970
4971                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4972                 udelay(40);
4973
4974                 tw32_f(MAC_MODE, tp->mac_mode);
4975                 udelay(40);
4976         }
4977
4978 out:
4979         return current_link_up;
4980 }
4981
4982 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4983 {
4984         u32 orig_pause_cfg;
4985         u16 orig_active_speed;
4986         u8 orig_active_duplex;
4987         u32 mac_status;
4988         int current_link_up;
4989         int i;
4990
4991         orig_pause_cfg = tp->link_config.active_flowctrl;
4992         orig_active_speed = tp->link_config.active_speed;
4993         orig_active_duplex = tp->link_config.active_duplex;
4994
4995         if (!tg3_flag(tp, HW_AUTONEG) &&
4996             netif_carrier_ok(tp->dev) &&
4997             tg3_flag(tp, INIT_COMPLETE)) {
4998                 mac_status = tr32(MAC_STATUS);
4999                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5000                                MAC_STATUS_SIGNAL_DET |
5001                                MAC_STATUS_CFG_CHANGED |
5002                                MAC_STATUS_RCVD_CFG);
5003                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5004                                    MAC_STATUS_SIGNAL_DET)) {
5005                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5006                                             MAC_STATUS_CFG_CHANGED));
5007                         return 0;
5008                 }
5009         }
5010
5011         tw32_f(MAC_TX_AUTO_NEG, 0);
5012
5013         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5014         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5015         tw32_f(MAC_MODE, tp->mac_mode);
5016         udelay(40);
5017
5018         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5019                 tg3_init_bcm8002(tp);
5020
5021         /* Enable link change event even when serdes polling.  */
5022         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5023         udelay(40);
5024
5025         current_link_up = 0;
5026         tp->link_config.rmt_adv = 0;
5027         mac_status = tr32(MAC_STATUS);
5028
5029         if (tg3_flag(tp, HW_AUTONEG))
5030                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5031         else
5032                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5033
5034         tp->napi[0].hw_status->status =
5035                 (SD_STATUS_UPDATED |
5036                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5037
5038         for (i = 0; i < 100; i++) {
5039                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5040                                     MAC_STATUS_CFG_CHANGED));
5041                 udelay(5);
5042                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5043                                          MAC_STATUS_CFG_CHANGED |
5044                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5045                         break;
5046         }
5047
5048         mac_status = tr32(MAC_STATUS);
5049         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5050                 current_link_up = 0;
5051                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5052                     tp->serdes_counter == 0) {
5053                         tw32_f(MAC_MODE, (tp->mac_mode |
5054                                           MAC_MODE_SEND_CONFIGS));
5055                         udelay(1);
5056                         tw32_f(MAC_MODE, tp->mac_mode);
5057                 }
5058         }
5059
5060         if (current_link_up == 1) {
5061                 tp->link_config.active_speed = SPEED_1000;
5062                 tp->link_config.active_duplex = DUPLEX_FULL;
5063                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5064                                     LED_CTRL_LNKLED_OVERRIDE |
5065                                     LED_CTRL_1000MBPS_ON));
5066         } else {
5067                 tp->link_config.active_speed = SPEED_INVALID;
5068                 tp->link_config.active_duplex = DUPLEX_INVALID;
5069                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5070                                     LED_CTRL_LNKLED_OVERRIDE |
5071                                     LED_CTRL_TRAFFIC_OVERRIDE));
5072         }
5073
5074         if (current_link_up != netif_carrier_ok(tp->dev)) {
5075                 if (current_link_up)
5076                         netif_carrier_on(tp->dev);
5077                 else
5078                         netif_carrier_off(tp->dev);
5079                 tg3_link_report(tp);
5080         } else {
5081                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5082                 if (orig_pause_cfg != now_pause_cfg ||
5083                     orig_active_speed != tp->link_config.active_speed ||
5084                     orig_active_duplex != tp->link_config.active_duplex)
5085                         tg3_link_report(tp);
5086         }
5087
5088         return 0;
5089 }
5090
5091 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5092 {
5093         int current_link_up, err = 0;
5094         u32 bmsr, bmcr;
5095         u16 current_speed;
5096         u8 current_duplex;
5097         u32 local_adv, remote_adv;
5098
5099         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5100         tw32_f(MAC_MODE, tp->mac_mode);
5101         udelay(40);
5102
5103         tw32(MAC_EVENT, 0);
5104
5105         tw32_f(MAC_STATUS,
5106              (MAC_STATUS_SYNC_CHANGED |
5107               MAC_STATUS_CFG_CHANGED |
5108               MAC_STATUS_MI_COMPLETION |
5109               MAC_STATUS_LNKSTATE_CHANGED));
5110         udelay(40);
5111
5112         if (force_reset)
5113                 tg3_phy_reset(tp);
5114
5115         current_link_up = 0;
5116         current_speed = SPEED_INVALID;
5117         current_duplex = DUPLEX_INVALID;
5118         tp->link_config.rmt_adv = 0;
5119
5120         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5121         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5123                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5124                         bmsr |= BMSR_LSTATUS;
5125                 else
5126                         bmsr &= ~BMSR_LSTATUS;
5127         }
5128
5129         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5130
5131         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5132             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5133                 /* do nothing, just check for link up at the end */
5134         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5135                 u32 adv, newadv;
5136
5137                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5138                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5139                                  ADVERTISE_1000XPAUSE |
5140                                  ADVERTISE_1000XPSE_ASYM |
5141                                  ADVERTISE_SLCT);
5142
5143                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5144                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5145
5146                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5147                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5148                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5149                         tg3_writephy(tp, MII_BMCR, bmcr);
5150
5151                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5152                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5153                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5154
5155                         return err;
5156                 }
5157         } else {
5158                 u32 new_bmcr;
5159
5160                 bmcr &= ~BMCR_SPEED1000;
5161                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5162
5163                 if (tp->link_config.duplex == DUPLEX_FULL)
5164                         new_bmcr |= BMCR_FULLDPLX;
5165
5166                 if (new_bmcr != bmcr) {
5167                         /* BMCR_SPEED1000 is a reserved bit that needs
5168                          * to be set on write.
5169                          */
5170                         new_bmcr |= BMCR_SPEED1000;
5171
5172                         /* Force a linkdown */
5173                         if (netif_carrier_ok(tp->dev)) {
5174                                 u32 adv;
5175
5176                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5177                                 adv &= ~(ADVERTISE_1000XFULL |
5178                                          ADVERTISE_1000XHALF |
5179                                          ADVERTISE_SLCT);
5180                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5181                                 tg3_writephy(tp, MII_BMCR, bmcr |
5182                                                            BMCR_ANRESTART |
5183                                                            BMCR_ANENABLE);
5184                                 udelay(10);
5185                                 netif_carrier_off(tp->dev);
5186                         }
5187                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5188                         bmcr = new_bmcr;
5189                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5190                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5191                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5192                             ASIC_REV_5714) {
5193                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5194                                         bmsr |= BMSR_LSTATUS;
5195                                 else
5196                                         bmsr &= ~BMSR_LSTATUS;
5197                         }
5198                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5199                 }
5200         }
5201
5202         if (bmsr & BMSR_LSTATUS) {
5203                 current_speed = SPEED_1000;
5204                 current_link_up = 1;
5205                 if (bmcr & BMCR_FULLDPLX)
5206                         current_duplex = DUPLEX_FULL;
5207                 else
5208                         current_duplex = DUPLEX_HALF;
5209
5210                 local_adv = 0;
5211                 remote_adv = 0;
5212
5213                 if (bmcr & BMCR_ANENABLE) {
5214                         u32 common;
5215
5216                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5217                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5218                         common = local_adv & remote_adv;
5219                         if (common & (ADVERTISE_1000XHALF |
5220                                       ADVERTISE_1000XFULL)) {
5221                                 if (common & ADVERTISE_1000XFULL)
5222                                         current_duplex = DUPLEX_FULL;
5223                                 else
5224                                         current_duplex = DUPLEX_HALF;
5225
5226                                 tp->link_config.rmt_adv =
5227                                            mii_adv_to_ethtool_adv_x(remote_adv);
5228                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5229                                 /* Link is up via parallel detect */
5230                         } else {
5231                                 current_link_up = 0;
5232                         }
5233                 }
5234         }
5235
5236         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5237                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5238
5239         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5240         if (tp->link_config.active_duplex == DUPLEX_HALF)
5241                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5242
5243         tw32_f(MAC_MODE, tp->mac_mode);
5244         udelay(40);
5245
5246         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5247
5248         tp->link_config.active_speed = current_speed;
5249         tp->link_config.active_duplex = current_duplex;
5250
5251         if (current_link_up != netif_carrier_ok(tp->dev)) {
5252                 if (current_link_up)
5253                         netif_carrier_on(tp->dev);
5254                 else {
5255                         netif_carrier_off(tp->dev);
5256                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5257                 }
5258                 tg3_link_report(tp);
5259         }
5260         return err;
5261 }
5262
5263 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5264 {
5265         if (tp->serdes_counter) {
5266                 /* Give autoneg time to complete. */
5267                 tp->serdes_counter--;
5268                 return;
5269         }
5270
5271         if (!netif_carrier_ok(tp->dev) &&
5272             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5273                 u32 bmcr;
5274
5275                 tg3_readphy(tp, MII_BMCR, &bmcr);
5276                 if (bmcr & BMCR_ANENABLE) {
5277                         u32 phy1, phy2;
5278
5279                         /* Select shadow register 0x1f */
5280                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5281                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5282
5283                         /* Select expansion interrupt status register */
5284                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5285                                          MII_TG3_DSP_EXP1_INT_STAT);
5286                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5287                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5288
5289                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5290                                 /* We have signal detect and not receiving
5291                                  * config code words, link is up by parallel
5292                                  * detection.
5293                                  */
5294
5295                                 bmcr &= ~BMCR_ANENABLE;
5296                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5297                                 tg3_writephy(tp, MII_BMCR, bmcr);
5298                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5299                         }
5300                 }
5301         } else if (netif_carrier_ok(tp->dev) &&
5302                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5303                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5304                 u32 phy2;
5305
5306                 /* Select expansion interrupt status register */
5307                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5308                                  MII_TG3_DSP_EXP1_INT_STAT);
5309                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5310                 if (phy2 & 0x20) {
5311                         u32 bmcr;
5312
5313                         /* Config code words received, turn on autoneg. */
5314                         tg3_readphy(tp, MII_BMCR, &bmcr);
5315                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5316
5317                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5318
5319                 }
5320         }
5321 }
5322
5323 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5324 {
5325         u32 val;
5326         int err;
5327
5328         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5329                 err = tg3_setup_fiber_phy(tp, force_reset);
5330         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5331                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5332         else
5333                 err = tg3_setup_copper_phy(tp, force_reset);
5334
5335         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5336                 u32 scale;
5337
5338                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5339                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5340                         scale = 65;
5341                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5342                         scale = 6;
5343                 else
5344                         scale = 12;
5345
5346                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5347                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5348                 tw32(GRC_MISC_CFG, val);
5349         }
5350
5351         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5352               (6 << TX_LENGTHS_IPG_SHIFT);
5353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5354                 val |= tr32(MAC_TX_LENGTHS) &
5355                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5356                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5357
5358         if (tp->link_config.active_speed == SPEED_1000 &&
5359             tp->link_config.active_duplex == DUPLEX_HALF)
5360                 tw32(MAC_TX_LENGTHS, val |
5361                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5362         else
5363                 tw32(MAC_TX_LENGTHS, val |
5364                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5365
5366         if (!tg3_flag(tp, 5705_PLUS)) {
5367                 if (netif_carrier_ok(tp->dev)) {
5368                         tw32(HOSTCC_STAT_COAL_TICKS,
5369                              tp->coal.stats_block_coalesce_usecs);
5370                 } else {
5371                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5372                 }
5373         }
5374
5375         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5376                 val = tr32(PCIE_PWR_MGMT_THRESH);
5377                 if (!netif_carrier_ok(tp->dev))
5378                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5379                               tp->pwrmgmt_thresh;
5380                 else
5381                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5382                 tw32(PCIE_PWR_MGMT_THRESH, val);
5383         }
5384
5385         return err;
5386 }
5387
5388 static inline int tg3_irq_sync(struct tg3 *tp)
5389 {
5390         return tp->irq_sync;
5391 }
5392
5393 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5394 {
5395         int i;
5396
5397         dst = (u32 *)((u8 *)dst + off);
5398         for (i = 0; i < len; i += sizeof(u32))
5399                 *dst++ = tr32(off + i);
5400 }
5401
5402 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5403 {
5404         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5405         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5406         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5407         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5408         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5409         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5410         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5411         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5412         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5413         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5414         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5415         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5416         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5417         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5418         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5419         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5420         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5421         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5423
5424         if (tg3_flag(tp, SUPPORT_MSIX))
5425                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5426
5427         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5428         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5429         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5430         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5431         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5432         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5433         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5434         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5435
5436         if (!tg3_flag(tp, 5705_PLUS)) {
5437                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5438                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5439                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5440         }
5441
5442         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5443         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5444         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5445         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5446         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5447
5448         if (tg3_flag(tp, NVRAM))
5449                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5450 }
5451
5452 static void tg3_dump_state(struct tg3 *tp)
5453 {
5454         int i;
5455         u32 *regs;
5456
5457         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5458         if (!regs) {
5459                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5460                 return;
5461         }
5462
5463         if (tg3_flag(tp, PCI_EXPRESS)) {
5464                 /* Read up to but not including private PCI registers */
5465                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5466                         regs[i / sizeof(u32)] = tr32(i);
5467         } else
5468                 tg3_dump_legacy_regs(tp, regs);
5469
5470         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5471                 if (!regs[i + 0] && !regs[i + 1] &&
5472                     !regs[i + 2] && !regs[i + 3])
5473                         continue;
5474
5475                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5476                            i * 4,
5477                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5478         }
5479
5480         kfree(regs);
5481
5482         for (i = 0; i < tp->irq_cnt; i++) {
5483                 struct tg3_napi *tnapi = &tp->napi[i];
5484
5485                 /* SW status block */
5486                 netdev_err(tp->dev,
5487                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5488                            i,
5489                            tnapi->hw_status->status,
5490                            tnapi->hw_status->status_tag,
5491                            tnapi->hw_status->rx_jumbo_consumer,
5492                            tnapi->hw_status->rx_consumer,
5493                            tnapi->hw_status->rx_mini_consumer,
5494                            tnapi->hw_status->idx[0].rx_producer,
5495                            tnapi->hw_status->idx[0].tx_consumer);
5496
5497                 netdev_err(tp->dev,
5498                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5499                            i,
5500                            tnapi->last_tag, tnapi->last_irq_tag,
5501                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5502                            tnapi->rx_rcb_ptr,
5503                            tnapi->prodring.rx_std_prod_idx,
5504                            tnapi->prodring.rx_std_cons_idx,
5505                            tnapi->prodring.rx_jmb_prod_idx,
5506                            tnapi->prodring.rx_jmb_cons_idx);
5507         }
5508 }
5509
5510 /* This is called whenever we suspect that the system chipset is re-
5511  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5512  * is bogus tx completions. We try to recover by setting the
5513  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5514  * in the workqueue.
5515  */
5516 static void tg3_tx_recover(struct tg3 *tp)
5517 {
5518         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5519                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5520
5521         netdev_warn(tp->dev,
5522                     "The system may be re-ordering memory-mapped I/O "
5523                     "cycles to the network device, attempting to recover. "
5524                     "Please report the problem to the driver maintainer "
5525                     "and include system chipset information.\n");
5526
5527         spin_lock(&tp->lock);
5528         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5529         spin_unlock(&tp->lock);
5530 }
5531
5532 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5533 {
5534         /* Tell compiler to fetch tx indices from memory. */
5535         barrier();
5536         return tnapi->tx_pending -
5537                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5538 }
5539
5540 /* Tigon3 never reports partial packet sends.  So we do not
5541  * need special logic to handle SKBs that have not had all
5542  * of their frags sent yet, like SunGEM does.
5543  */
5544 static void tg3_tx(struct tg3_napi *tnapi)
5545 {
5546         struct tg3 *tp = tnapi->tp;
5547         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5548         u32 sw_idx = tnapi->tx_cons;
5549         struct netdev_queue *txq;
5550         int index = tnapi - tp->napi;
5551         unsigned int pkts_compl = 0, bytes_compl = 0;
5552
5553         if (tg3_flag(tp, ENABLE_TSS))
5554                 index--;
5555
5556         txq = netdev_get_tx_queue(tp->dev, index);
5557
5558         while (sw_idx != hw_idx) {
5559                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5560                 struct sk_buff *skb = ri->skb;
5561                 int i, tx_bug = 0;
5562
5563                 if (unlikely(skb == NULL)) {
5564                         tg3_tx_recover(tp);
5565                         return;
5566                 }
5567
5568                 pci_unmap_single(tp->pdev,
5569                                  dma_unmap_addr(ri, mapping),
5570                                  skb_headlen(skb),
5571                                  PCI_DMA_TODEVICE);
5572
5573                 ri->skb = NULL;
5574
5575                 while (ri->fragmented) {
5576                         ri->fragmented = false;
5577                         sw_idx = NEXT_TX(sw_idx);
5578                         ri = &tnapi->tx_buffers[sw_idx];
5579                 }
5580
5581                 sw_idx = NEXT_TX(sw_idx);
5582
5583                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5584                         ri = &tnapi->tx_buffers[sw_idx];
5585                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5586                                 tx_bug = 1;
5587
5588                         pci_unmap_page(tp->pdev,
5589                                        dma_unmap_addr(ri, mapping),
5590                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5591                                        PCI_DMA_TODEVICE);
5592
5593                         while (ri->fragmented) {
5594                                 ri->fragmented = false;
5595                                 sw_idx = NEXT_TX(sw_idx);
5596                                 ri = &tnapi->tx_buffers[sw_idx];
5597                         }
5598
5599                         sw_idx = NEXT_TX(sw_idx);
5600                 }
5601
5602                 pkts_compl++;
5603                 bytes_compl += skb->len;
5604
5605                 dev_kfree_skb(skb);
5606
5607                 if (unlikely(tx_bug)) {
5608                         tg3_tx_recover(tp);
5609                         return;
5610                 }
5611         }
5612
5613         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5614
5615         tnapi->tx_cons = sw_idx;
5616
5617         /* Need to make the tx_cons update visible to tg3_start_xmit()
5618          * before checking for netif_queue_stopped().  Without the
5619          * memory barrier, there is a small possibility that tg3_start_xmit()
5620          * will miss it and cause the queue to be stopped forever.
5621          */
5622         smp_mb();
5623
5624         if (unlikely(netif_tx_queue_stopped(txq) &&
5625                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5626                 __netif_tx_lock(txq, smp_processor_id());
5627                 if (netif_tx_queue_stopped(txq) &&
5628                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5629                         netif_tx_wake_queue(txq);
5630                 __netif_tx_unlock(txq);
5631         }
5632 }
5633
5634 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5635 {
5636         if (!ri->data)
5637                 return;
5638
5639         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5640                          map_sz, PCI_DMA_FROMDEVICE);
5641         kfree(ri->data);
5642         ri->data = NULL;
5643 }
5644
5645 /* Returns size of skb allocated or < 0 on error.
5646  *
5647  * We only need to fill in the address because the other members
5648  * of the RX descriptor are invariant, see tg3_init_rings.
5649  *
5650  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5651  * posting buffers we only dirty the first cache line of the RX
5652  * descriptor (containing the address).  Whereas for the RX status
5653  * buffers the cpu only reads the last cacheline of the RX descriptor
5654  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5655  */
5656 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5657                             u32 opaque_key, u32 dest_idx_unmasked)
5658 {
5659         struct tg3_rx_buffer_desc *desc;
5660         struct ring_info *map;
5661         u8 *data;
5662         dma_addr_t mapping;
5663         int skb_size, data_size, dest_idx;
5664
5665         switch (opaque_key) {
5666         case RXD_OPAQUE_RING_STD:
5667                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5668                 desc = &tpr->rx_std[dest_idx];
5669                 map = &tpr->rx_std_buffers[dest_idx];
5670                 data_size = tp->rx_pkt_map_sz;
5671                 break;
5672
5673         case RXD_OPAQUE_RING_JUMBO:
5674                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5675                 desc = &tpr->rx_jmb[dest_idx].std;
5676                 map = &tpr->rx_jmb_buffers[dest_idx];
5677                 data_size = TG3_RX_JMB_MAP_SZ;
5678                 break;
5679
5680         default:
5681                 return -EINVAL;
5682         }
5683
5684         /* Do not overwrite any of the map or rp information
5685          * until we are sure we can commit to a new buffer.
5686          *
5687          * Callers depend upon this behavior and assume that
5688          * we leave everything unchanged if we fail.
5689          */
5690         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5691                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5692         data = kmalloc(skb_size, GFP_ATOMIC);
5693         if (!data)
5694                 return -ENOMEM;
5695
5696         mapping = pci_map_single(tp->pdev,
5697                                  data + TG3_RX_OFFSET(tp),
5698                                  data_size,
5699                                  PCI_DMA_FROMDEVICE);
5700         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5701                 kfree(data);
5702                 return -EIO;
5703         }
5704
5705         map->data = data;
5706         dma_unmap_addr_set(map, mapping, mapping);
5707
5708         desc->addr_hi = ((u64)mapping >> 32);
5709         desc->addr_lo = ((u64)mapping & 0xffffffff);
5710
5711         return data_size;
5712 }
5713
5714 /* We only need to move over in the address because the other
5715  * members of the RX descriptor are invariant.  See notes above
5716  * tg3_alloc_rx_data for full details.
5717  */
5718 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5719                            struct tg3_rx_prodring_set *dpr,
5720                            u32 opaque_key, int src_idx,
5721                            u32 dest_idx_unmasked)
5722 {
5723         struct tg3 *tp = tnapi->tp;
5724         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5725         struct ring_info *src_map, *dest_map;
5726         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5727         int dest_idx;
5728
5729         switch (opaque_key) {
5730         case RXD_OPAQUE_RING_STD:
5731                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5732                 dest_desc = &dpr->rx_std[dest_idx];
5733                 dest_map = &dpr->rx_std_buffers[dest_idx];
5734                 src_desc = &spr->rx_std[src_idx];
5735                 src_map = &spr->rx_std_buffers[src_idx];
5736                 break;
5737
5738         case RXD_OPAQUE_RING_JUMBO:
5739                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5740                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5741                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5742                 src_desc = &spr->rx_jmb[src_idx].std;
5743                 src_map = &spr->rx_jmb_buffers[src_idx];
5744                 break;
5745
5746         default:
5747                 return;
5748         }
5749
5750         dest_map->data = src_map->data;
5751         dma_unmap_addr_set(dest_map, mapping,
5752                            dma_unmap_addr(src_map, mapping));
5753         dest_desc->addr_hi = src_desc->addr_hi;
5754         dest_desc->addr_lo = src_desc->addr_lo;
5755
5756         /* Ensure that the update to the skb happens after the physical
5757          * addresses have been transferred to the new BD location.
5758          */
5759         smp_wmb();
5760
5761         src_map->data = NULL;
5762 }
5763
5764 /* The RX ring scheme is composed of multiple rings which post fresh
5765  * buffers to the chip, and one special ring the chip uses to report
5766  * status back to the host.
5767  *
5768  * The special ring reports the status of received packets to the
5769  * host.  The chip does not write into the original descriptor the
5770  * RX buffer was obtained from.  The chip simply takes the original
5771  * descriptor as provided by the host, updates the status and length
5772  * field, then writes this into the next status ring entry.
5773  *
5774  * Each ring the host uses to post buffers to the chip is described
5775  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5776  * it is first placed into the on-chip ram.  When the packet's length
5777  * is known, it walks down the TG3_BDINFO entries to select the ring.
5778  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5779  * which is within the range of the new packet's length is chosen.
5780  *
5781  * The "separate ring for rx status" scheme may sound queer, but it makes
5782  * sense from a cache coherency perspective.  If only the host writes
5783  * to the buffer post rings, and only the chip writes to the rx status
5784  * rings, then cache lines never move beyond shared-modified state.
5785  * If both the host and chip were to write into the same ring, cache line
5786  * eviction could occur since both entities want it in an exclusive state.
5787  */
5788 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5789 {
5790         struct tg3 *tp = tnapi->tp;
5791         u32 work_mask, rx_std_posted = 0;
5792         u32 std_prod_idx, jmb_prod_idx;
5793         u32 sw_idx = tnapi->rx_rcb_ptr;
5794         u16 hw_idx;
5795         int received;
5796         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5797
5798         hw_idx = *(tnapi->rx_rcb_prod_idx);
5799         /*
5800          * We need to order the read of hw_idx and the read of
5801          * the opaque cookie.
5802          */
5803         rmb();
5804         work_mask = 0;
5805         received = 0;
5806         std_prod_idx = tpr->rx_std_prod_idx;
5807         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5808         while (sw_idx != hw_idx && budget > 0) {
5809                 struct ring_info *ri;
5810                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5811                 unsigned int len;
5812                 struct sk_buff *skb;
5813                 dma_addr_t dma_addr;
5814                 u32 opaque_key, desc_idx, *post_ptr;
5815                 u8 *data;
5816
5817                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5818                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5819                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5820                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5821                         dma_addr = dma_unmap_addr(ri, mapping);
5822                         data = ri->data;
5823                         post_ptr = &std_prod_idx;
5824                         rx_std_posted++;
5825                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5826                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5827                         dma_addr = dma_unmap_addr(ri, mapping);
5828                         data = ri->data;
5829                         post_ptr = &jmb_prod_idx;
5830                 } else
5831                         goto next_pkt_nopost;
5832
5833                 work_mask |= opaque_key;
5834
5835                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5836                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5837                 drop_it:
5838                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5839                                        desc_idx, *post_ptr);
5840                 drop_it_no_recycle:
5841                         /* Other statistics kept track of by card. */
5842                         tp->rx_dropped++;
5843                         goto next_pkt;
5844                 }
5845
5846                 prefetch(data + TG3_RX_OFFSET(tp));
5847                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5848                       ETH_FCS_LEN;
5849
5850                 if (len > TG3_RX_COPY_THRESH(tp)) {
5851                         int skb_size;
5852
5853                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5854                                                     *post_ptr);
5855                         if (skb_size < 0)
5856                                 goto drop_it;
5857
5858                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5859                                          PCI_DMA_FROMDEVICE);
5860
5861                         skb = build_skb(data);
5862                         if (!skb) {
5863                                 kfree(data);
5864                                 goto drop_it_no_recycle;
5865                         }
5866                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5867                         /* Ensure that the update to the data happens
5868                          * after the usage of the old DMA mapping.
5869                          */
5870                         smp_wmb();
5871
5872                         ri->data = NULL;
5873
5874                 } else {
5875                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5876                                        desc_idx, *post_ptr);
5877
5878                         skb = netdev_alloc_skb(tp->dev,
5879                                                len + TG3_RAW_IP_ALIGN);
5880                         if (skb == NULL)
5881                                 goto drop_it_no_recycle;
5882
5883                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5884                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5885                         memcpy(skb->data,
5886                                data + TG3_RX_OFFSET(tp),
5887                                len);
5888                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5889                 }
5890
5891                 skb_put(skb, len);
5892                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5893                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5894                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5895                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5896                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5897                 else
5898                         skb_checksum_none_assert(skb);
5899
5900                 skb->protocol = eth_type_trans(skb, tp->dev);
5901
5902                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5903                     skb->protocol != htons(ETH_P_8021Q)) {
5904                         dev_kfree_skb(skb);
5905                         goto drop_it_no_recycle;
5906                 }
5907
5908                 if (desc->type_flags & RXD_FLAG_VLAN &&
5909                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5910                         __vlan_hwaccel_put_tag(skb,
5911                                                desc->err_vlan & RXD_VLAN_MASK);
5912
5913                 napi_gro_receive(&tnapi->napi, skb);
5914
5915                 received++;
5916                 budget--;
5917
5918 next_pkt:
5919                 (*post_ptr)++;
5920
5921                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5922                         tpr->rx_std_prod_idx = std_prod_idx &
5923                                                tp->rx_std_ring_mask;
5924                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5925                                      tpr->rx_std_prod_idx);
5926                         work_mask &= ~RXD_OPAQUE_RING_STD;
5927                         rx_std_posted = 0;
5928                 }
5929 next_pkt_nopost:
5930                 sw_idx++;
5931                 sw_idx &= tp->rx_ret_ring_mask;
5932
5933                 /* Refresh hw_idx to see if there is new work */
5934                 if (sw_idx == hw_idx) {
5935                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5936                         rmb();
5937                 }
5938         }
5939
5940         /* ACK the status ring. */
5941         tnapi->rx_rcb_ptr = sw_idx;
5942         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5943
5944         /* Refill RX ring(s). */
5945         if (!tg3_flag(tp, ENABLE_RSS)) {
5946                 if (work_mask & RXD_OPAQUE_RING_STD) {
5947                         tpr->rx_std_prod_idx = std_prod_idx &
5948                                                tp->rx_std_ring_mask;
5949                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5950                                      tpr->rx_std_prod_idx);
5951                 }
5952                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5953                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5954                                                tp->rx_jmb_ring_mask;
5955                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5956                                      tpr->rx_jmb_prod_idx);
5957                 }
5958                 mmiowb();
5959         } else if (work_mask) {
5960                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5961                  * updated before the producer indices can be updated.
5962                  */
5963                 smp_wmb();
5964
5965                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5966                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5967
5968                 if (tnapi != &tp->napi[1])
5969                         napi_schedule(&tp->napi[1].napi);
5970         }
5971
5972         return received;
5973 }
5974
5975 static void tg3_poll_link(struct tg3 *tp)
5976 {
5977         /* handle link change and other phy events */
5978         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5979                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5980
5981                 if (sblk->status & SD_STATUS_LINK_CHG) {
5982                         sblk->status = SD_STATUS_UPDATED |
5983                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5984                         spin_lock(&tp->lock);
5985                         if (tg3_flag(tp, USE_PHYLIB)) {
5986                                 tw32_f(MAC_STATUS,
5987                                      (MAC_STATUS_SYNC_CHANGED |
5988                                       MAC_STATUS_CFG_CHANGED |
5989                                       MAC_STATUS_MI_COMPLETION |
5990                                       MAC_STATUS_LNKSTATE_CHANGED));
5991                                 udelay(40);
5992                         } else
5993                                 tg3_setup_phy(tp, 0);
5994                         spin_unlock(&tp->lock);
5995                 }
5996         }
5997 }
5998
5999 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6000                                 struct tg3_rx_prodring_set *dpr,
6001                                 struct tg3_rx_prodring_set *spr)
6002 {
6003         u32 si, di, cpycnt, src_prod_idx;
6004         int i, err = 0;
6005
6006         while (1) {
6007                 src_prod_idx = spr->rx_std_prod_idx;
6008
6009                 /* Make sure updates to the rx_std_buffers[] entries and the
6010                  * standard producer index are seen in the correct order.
6011                  */
6012                 smp_rmb();
6013
6014                 if (spr->rx_std_cons_idx == src_prod_idx)
6015                         break;
6016
6017                 if (spr->rx_std_cons_idx < src_prod_idx)
6018                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6019                 else
6020                         cpycnt = tp->rx_std_ring_mask + 1 -
6021                                  spr->rx_std_cons_idx;
6022
6023                 cpycnt = min(cpycnt,
6024                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6025
6026                 si = spr->rx_std_cons_idx;
6027                 di = dpr->rx_std_prod_idx;
6028
6029                 for (i = di; i < di + cpycnt; i++) {
6030                         if (dpr->rx_std_buffers[i].data) {
6031                                 cpycnt = i - di;
6032                                 err = -ENOSPC;
6033                                 break;
6034                         }
6035                 }
6036
6037                 if (!cpycnt)
6038                         break;
6039
6040                 /* Ensure that updates to the rx_std_buffers ring and the
6041                  * shadowed hardware producer ring from tg3_recycle_skb() are
6042                  * ordered correctly WRT the skb check above.
6043                  */
6044                 smp_rmb();
6045
6046                 memcpy(&dpr->rx_std_buffers[di],
6047                        &spr->rx_std_buffers[si],
6048                        cpycnt * sizeof(struct ring_info));
6049
6050                 for (i = 0; i < cpycnt; i++, di++, si++) {
6051                         struct tg3_rx_buffer_desc *sbd, *dbd;
6052                         sbd = &spr->rx_std[si];
6053                         dbd = &dpr->rx_std[di];
6054                         dbd->addr_hi = sbd->addr_hi;
6055                         dbd->addr_lo = sbd->addr_lo;
6056                 }
6057
6058                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6059                                        tp->rx_std_ring_mask;
6060                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6061                                        tp->rx_std_ring_mask;
6062         }
6063
6064         while (1) {
6065                 src_prod_idx = spr->rx_jmb_prod_idx;
6066
6067                 /* Make sure updates to the rx_jmb_buffers[] entries and
6068                  * the jumbo producer index are seen in the correct order.
6069                  */
6070                 smp_rmb();
6071
6072                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6073                         break;
6074
6075                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6076                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6077                 else
6078                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6079                                  spr->rx_jmb_cons_idx;
6080
6081                 cpycnt = min(cpycnt,
6082                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6083
6084                 si = spr->rx_jmb_cons_idx;
6085                 di = dpr->rx_jmb_prod_idx;
6086
6087                 for (i = di; i < di + cpycnt; i++) {
6088                         if (dpr->rx_jmb_buffers[i].data) {
6089                                 cpycnt = i - di;
6090                                 err = -ENOSPC;
6091                                 break;
6092                         }
6093                 }
6094
6095                 if (!cpycnt)
6096                         break;
6097
6098                 /* Ensure that updates to the rx_jmb_buffers ring and the
6099                  * shadowed hardware producer ring from tg3_recycle_skb() are
6100                  * ordered correctly WRT the skb check above.
6101                  */
6102                 smp_rmb();
6103
6104                 memcpy(&dpr->rx_jmb_buffers[di],
6105                        &spr->rx_jmb_buffers[si],
6106                        cpycnt * sizeof(struct ring_info));
6107
6108                 for (i = 0; i < cpycnt; i++, di++, si++) {
6109                         struct tg3_rx_buffer_desc *sbd, *dbd;
6110                         sbd = &spr->rx_jmb[si].std;
6111                         dbd = &dpr->rx_jmb[di].std;
6112                         dbd->addr_hi = sbd->addr_hi;
6113                         dbd->addr_lo = sbd->addr_lo;
6114                 }
6115
6116                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6117                                        tp->rx_jmb_ring_mask;
6118                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6119                                        tp->rx_jmb_ring_mask;
6120         }
6121
6122         return err;
6123 }
6124
6125 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6126 {
6127         struct tg3 *tp = tnapi->tp;
6128
6129         /* run TX completion thread */
6130         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6131                 tg3_tx(tnapi);
6132                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6133                         return work_done;
6134         }
6135
6136         /* run RX thread, within the bounds set by NAPI.
6137          * All RX "locking" is done by ensuring outside
6138          * code synchronizes with tg3->napi.poll()
6139          */
6140         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6141                 work_done += tg3_rx(tnapi, budget - work_done);
6142
6143         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6144                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6145                 int i, err = 0;
6146                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6147                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6148
6149                 for (i = 1; i < tp->irq_cnt; i++)
6150                         err |= tg3_rx_prodring_xfer(tp, dpr,
6151                                                     &tp->napi[i].prodring);
6152
6153                 wmb();
6154
6155                 if (std_prod_idx != dpr->rx_std_prod_idx)
6156                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6157                                      dpr->rx_std_prod_idx);
6158
6159                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6160                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6161                                      dpr->rx_jmb_prod_idx);
6162
6163                 mmiowb();
6164
6165                 if (err)
6166                         tw32_f(HOSTCC_MODE, tp->coal_now);
6167         }
6168
6169         return work_done;
6170 }
6171
6172 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6173 {
6174         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6175                 schedule_work(&tp->reset_task);
6176 }
6177
6178 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6179 {
6180         cancel_work_sync(&tp->reset_task);
6181         tg3_flag_clear(tp, RESET_TASK_PENDING);
6182 }
6183
6184 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6185 {
6186         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6187         struct tg3 *tp = tnapi->tp;
6188         int work_done = 0;
6189         struct tg3_hw_status *sblk = tnapi->hw_status;
6190
6191         while (1) {
6192                 work_done = tg3_poll_work(tnapi, work_done, budget);
6193
6194                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6195                         goto tx_recovery;
6196
6197                 if (unlikely(work_done >= budget))
6198                         break;
6199
6200                 /* tp->last_tag is used in tg3_int_reenable() below
6201                  * to tell the hw how much work has been processed,
6202                  * so we must read it before checking for more work.
6203                  */
6204                 tnapi->last_tag = sblk->status_tag;
6205                 tnapi->last_irq_tag = tnapi->last_tag;
6206                 rmb();
6207
6208                 /* check for RX/TX work to do */
6209                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6210                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6211                         napi_complete(napi);
6212                         /* Reenable interrupts. */
6213                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6214                         mmiowb();
6215                         break;
6216                 }
6217         }
6218
6219         return work_done;
6220
6221 tx_recovery:
6222         /* work_done is guaranteed to be less than budget. */
6223         napi_complete(napi);
6224         tg3_reset_task_schedule(tp);
6225         return work_done;
6226 }
6227
6228 static void tg3_process_error(struct tg3 *tp)
6229 {
6230         u32 val;
6231         bool real_error = false;
6232
6233         if (tg3_flag(tp, ERROR_PROCESSED))
6234                 return;
6235
6236         /* Check Flow Attention register */
6237         val = tr32(HOSTCC_FLOW_ATTN);
6238         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6239                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6240                 real_error = true;
6241         }
6242
6243         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6244                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6245                 real_error = true;
6246         }
6247
6248         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6249                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6250                 real_error = true;
6251         }
6252
6253         if (!real_error)
6254                 return;
6255
6256         tg3_dump_state(tp);
6257
6258         tg3_flag_set(tp, ERROR_PROCESSED);
6259         tg3_reset_task_schedule(tp);
6260 }
6261
6262 static int tg3_poll(struct napi_struct *napi, int budget)
6263 {
6264         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6265         struct tg3 *tp = tnapi->tp;
6266         int work_done = 0;
6267         struct tg3_hw_status *sblk = tnapi->hw_status;
6268
6269         while (1) {
6270                 if (sblk->status & SD_STATUS_ERROR)
6271                         tg3_process_error(tp);
6272
6273                 tg3_poll_link(tp);
6274
6275                 work_done = tg3_poll_work(tnapi, work_done, budget);
6276
6277                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6278                         goto tx_recovery;
6279
6280                 if (unlikely(work_done >= budget))
6281                         break;
6282
6283                 if (tg3_flag(tp, TAGGED_STATUS)) {
6284                         /* tp->last_tag is used in tg3_int_reenable() below
6285                          * to tell the hw how much work has been processed,
6286                          * so we must read it before checking for more work.
6287                          */
6288                         tnapi->last_tag = sblk->status_tag;
6289                         tnapi->last_irq_tag = tnapi->last_tag;
6290                         rmb();
6291                 } else
6292                         sblk->status &= ~SD_STATUS_UPDATED;
6293
6294                 if (likely(!tg3_has_work(tnapi))) {
6295                         napi_complete(napi);
6296                         tg3_int_reenable(tnapi);
6297                         break;
6298                 }
6299         }
6300
6301         return work_done;
6302
6303 tx_recovery:
6304         /* work_done is guaranteed to be less than budget. */
6305         napi_complete(napi);
6306         tg3_reset_task_schedule(tp);
6307         return work_done;
6308 }
6309
6310 static void tg3_napi_disable(struct tg3 *tp)
6311 {
6312         int i;
6313
6314         for (i = tp->irq_cnt - 1; i >= 0; i--)
6315                 napi_disable(&tp->napi[i].napi);
6316 }
6317
6318 static void tg3_napi_enable(struct tg3 *tp)
6319 {
6320         int i;
6321
6322         for (i = 0; i < tp->irq_cnt; i++)
6323                 napi_enable(&tp->napi[i].napi);
6324 }
6325
6326 static void tg3_napi_init(struct tg3 *tp)
6327 {
6328         int i;
6329
6330         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6331         for (i = 1; i < tp->irq_cnt; i++)
6332                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6333 }
6334
6335 static void tg3_napi_fini(struct tg3 *tp)
6336 {
6337         int i;
6338
6339         for (i = 0; i < tp->irq_cnt; i++)
6340                 netif_napi_del(&tp->napi[i].napi);
6341 }
6342
6343 static inline void tg3_netif_stop(struct tg3 *tp)
6344 {
6345         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6346         tg3_napi_disable(tp);
6347         netif_tx_disable(tp->dev);
6348 }
6349
6350 static inline void tg3_netif_start(struct tg3 *tp)
6351 {
6352         /* NOTE: unconditional netif_tx_wake_all_queues is only
6353          * appropriate so long as all callers are assured to
6354          * have free tx slots (such as after tg3_init_hw)
6355          */
6356         netif_tx_wake_all_queues(tp->dev);
6357
6358         tg3_napi_enable(tp);
6359         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6360         tg3_enable_ints(tp);
6361 }
6362
6363 static void tg3_irq_quiesce(struct tg3 *tp)
6364 {
6365         int i;
6366
6367         BUG_ON(tp->irq_sync);
6368
6369         tp->irq_sync = 1;
6370         smp_mb();
6371
6372         for (i = 0; i < tp->irq_cnt; i++)
6373                 synchronize_irq(tp->napi[i].irq_vec);
6374 }
6375
6376 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6377  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6378  * with as well.  Most of the time, this is not necessary except when
6379  * shutting down the device.
6380  */
6381 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6382 {
6383         spin_lock_bh(&tp->lock);
6384         if (irq_sync)
6385                 tg3_irq_quiesce(tp);
6386 }
6387
6388 static inline void tg3_full_unlock(struct tg3 *tp)
6389 {
6390         spin_unlock_bh(&tp->lock);
6391 }
6392
6393 /* One-shot MSI handler - Chip automatically disables interrupt
6394  * after sending MSI so driver doesn't have to do it.
6395  */
6396 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6397 {
6398         struct tg3_napi *tnapi = dev_id;
6399         struct tg3 *tp = tnapi->tp;
6400
6401         prefetch(tnapi->hw_status);
6402         if (tnapi->rx_rcb)
6403                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6404
6405         if (likely(!tg3_irq_sync(tp)))
6406                 napi_schedule(&tnapi->napi);
6407
6408         return IRQ_HANDLED;
6409 }
6410
6411 /* MSI ISR - No need to check for interrupt sharing and no need to
6412  * flush status block and interrupt mailbox. PCI ordering rules
6413  * guarantee that MSI will arrive after the status block.
6414  */
6415 static irqreturn_t tg3_msi(int irq, void *dev_id)
6416 {
6417         struct tg3_napi *tnapi = dev_id;
6418         struct tg3 *tp = tnapi->tp;
6419
6420         prefetch(tnapi->hw_status);
6421         if (tnapi->rx_rcb)
6422                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6423         /*
6424          * Writing any value to intr-mbox-0 clears PCI INTA# and
6425          * chip-internal interrupt pending events.
6426          * Writing non-zero to intr-mbox-0 additional tells the
6427          * NIC to stop sending us irqs, engaging "in-intr-handler"
6428          * event coalescing.
6429          */
6430         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6431         if (likely(!tg3_irq_sync(tp)))
6432                 napi_schedule(&tnapi->napi);
6433
6434         return IRQ_RETVAL(1);
6435 }
6436
6437 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6438 {
6439         struct tg3_napi *tnapi = dev_id;
6440         struct tg3 *tp = tnapi->tp;
6441         struct tg3_hw_status *sblk = tnapi->hw_status;
6442         unsigned int handled = 1;
6443
6444         /* In INTx mode, it is possible for the interrupt to arrive at
6445          * the CPU before the status block posted prior to the interrupt.
6446          * Reading the PCI State register will confirm whether the
6447          * interrupt is ours and will flush the status block.
6448          */
6449         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6450                 if (tg3_flag(tp, CHIP_RESETTING) ||
6451                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6452                         handled = 0;
6453                         goto out;
6454                 }
6455         }
6456
6457         /*
6458          * Writing any value to intr-mbox-0 clears PCI INTA# and
6459          * chip-internal interrupt pending events.
6460          * Writing non-zero to intr-mbox-0 additional tells the
6461          * NIC to stop sending us irqs, engaging "in-intr-handler"
6462          * event coalescing.
6463          *
6464          * Flush the mailbox to de-assert the IRQ immediately to prevent
6465          * spurious interrupts.  The flush impacts performance but
6466          * excessive spurious interrupts can be worse in some cases.
6467          */
6468         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6469         if (tg3_irq_sync(tp))
6470                 goto out;
6471         sblk->status &= ~SD_STATUS_UPDATED;
6472         if (likely(tg3_has_work(tnapi))) {
6473                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6474                 napi_schedule(&tnapi->napi);
6475         } else {
6476                 /* No work, shared interrupt perhaps?  re-enable
6477                  * interrupts, and flush that PCI write
6478                  */
6479                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6480                                0x00000000);
6481         }
6482 out:
6483         return IRQ_RETVAL(handled);
6484 }
6485
6486 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6487 {
6488         struct tg3_napi *tnapi = dev_id;
6489         struct tg3 *tp = tnapi->tp;
6490         struct tg3_hw_status *sblk = tnapi->hw_status;
6491         unsigned int handled = 1;
6492
6493         /* In INTx mode, it is possible for the interrupt to arrive at
6494          * the CPU before the status block posted prior to the interrupt.
6495          * Reading the PCI State register will confirm whether the
6496          * interrupt is ours and will flush the status block.
6497          */
6498         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6499                 if (tg3_flag(tp, CHIP_RESETTING) ||
6500                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6501                         handled = 0;
6502                         goto out;
6503                 }
6504         }
6505
6506         /*
6507          * writing any value to intr-mbox-0 clears PCI INTA# and
6508          * chip-internal interrupt pending events.
6509          * writing non-zero to intr-mbox-0 additional tells the
6510          * NIC to stop sending us irqs, engaging "in-intr-handler"
6511          * event coalescing.
6512          *
6513          * Flush the mailbox to de-assert the IRQ immediately to prevent
6514          * spurious interrupts.  The flush impacts performance but
6515          * excessive spurious interrupts can be worse in some cases.
6516          */
6517         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6518
6519         /*
6520          * In a shared interrupt configuration, sometimes other devices'
6521          * interrupts will scream.  We record the current status tag here
6522          * so that the above check can report that the screaming interrupts
6523          * are unhandled.  Eventually they will be silenced.
6524          */
6525         tnapi->last_irq_tag = sblk->status_tag;
6526
6527         if (tg3_irq_sync(tp))
6528                 goto out;
6529
6530         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6531
6532         napi_schedule(&tnapi->napi);
6533
6534 out:
6535         return IRQ_RETVAL(handled);
6536 }
6537
6538 /* ISR for interrupt test */
6539 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6540 {
6541         struct tg3_napi *tnapi = dev_id;
6542         struct tg3 *tp = tnapi->tp;
6543         struct tg3_hw_status *sblk = tnapi->hw_status;
6544
6545         if ((sblk->status & SD_STATUS_UPDATED) ||
6546             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6547                 tg3_disable_ints(tp);
6548                 return IRQ_RETVAL(1);
6549         }
6550         return IRQ_RETVAL(0);
6551 }
6552
6553 #ifdef CONFIG_NET_POLL_CONTROLLER
6554 static void tg3_poll_controller(struct net_device *dev)
6555 {
6556         int i;
6557         struct tg3 *tp = netdev_priv(dev);
6558
6559         for (i = 0; i < tp->irq_cnt; i++)
6560                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6561 }
6562 #endif
6563
6564 static void tg3_tx_timeout(struct net_device *dev)
6565 {
6566         struct tg3 *tp = netdev_priv(dev);
6567
6568         if (netif_msg_tx_err(tp)) {
6569                 netdev_err(dev, "transmit timed out, resetting\n");
6570                 tg3_dump_state(tp);
6571         }
6572
6573         tg3_reset_task_schedule(tp);
6574 }
6575
6576 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6577 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6578 {
6579         u32 base = (u32) mapping & 0xffffffff;
6580
6581         return (base > 0xffffdcc0) && (base + len + 8 < base);
6582 }
6583
6584 /* Test for DMA addresses > 40-bit */
6585 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6586                                           int len)
6587 {
6588 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6589         if (tg3_flag(tp, 40BIT_DMA_BUG))
6590                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6591         return 0;
6592 #else
6593         return 0;
6594 #endif
6595 }
6596
6597 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6598                                  dma_addr_t mapping, u32 len, u32 flags,
6599                                  u32 mss, u32 vlan)
6600 {
6601         txbd->addr_hi = ((u64) mapping >> 32);
6602         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6603         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6604         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6605 }
6606
6607 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6608                             dma_addr_t map, u32 len, u32 flags,
6609                             u32 mss, u32 vlan)
6610 {
6611         struct tg3 *tp = tnapi->tp;
6612         bool hwbug = false;
6613
6614         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6615                 hwbug = true;
6616
6617         if (tg3_4g_overflow_test(map, len))
6618                 hwbug = true;
6619
6620         if (tg3_40bit_overflow_test(tp, map, len))
6621                 hwbug = true;
6622
6623         if (tp->dma_limit) {
6624                 u32 prvidx = *entry;
6625                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6626                 while (len > tp->dma_limit && *budget) {
6627                         u32 frag_len = tp->dma_limit;
6628                         len -= tp->dma_limit;
6629
6630                         /* Avoid the 8byte DMA problem */
6631                         if (len <= 8) {
6632                                 len += tp->dma_limit / 2;
6633                                 frag_len = tp->dma_limit / 2;
6634                         }
6635
6636                         tnapi->tx_buffers[*entry].fragmented = true;
6637
6638                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6639                                       frag_len, tmp_flag, mss, vlan);
6640                         *budget -= 1;
6641                         prvidx = *entry;
6642                         *entry = NEXT_TX(*entry);
6643
6644                         map += frag_len;
6645                 }
6646
6647                 if (len) {
6648                         if (*budget) {
6649                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6650                                               len, flags, mss, vlan);
6651                                 *budget -= 1;
6652                                 *entry = NEXT_TX(*entry);
6653                         } else {
6654                                 hwbug = true;
6655                                 tnapi->tx_buffers[prvidx].fragmented = false;
6656                         }
6657                 }
6658         } else {
6659                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6660                               len, flags, mss, vlan);
6661                 *entry = NEXT_TX(*entry);
6662         }
6663
6664         return hwbug;
6665 }
6666
6667 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6668 {
6669         int i;
6670         struct sk_buff *skb;
6671         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6672
6673         skb = txb->skb;
6674         txb->skb = NULL;
6675
6676         pci_unmap_single(tnapi->tp->pdev,
6677                          dma_unmap_addr(txb, mapping),
6678                          skb_headlen(skb),
6679                          PCI_DMA_TODEVICE);
6680
6681         while (txb->fragmented) {
6682                 txb->fragmented = false;
6683                 entry = NEXT_TX(entry);
6684                 txb = &tnapi->tx_buffers[entry];
6685         }
6686
6687         for (i = 0; i <= last; i++) {
6688                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6689
6690                 entry = NEXT_TX(entry);
6691                 txb = &tnapi->tx_buffers[entry];
6692
6693                 pci_unmap_page(tnapi->tp->pdev,
6694                                dma_unmap_addr(txb, mapping),
6695                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6696
6697                 while (txb->fragmented) {
6698                         txb->fragmented = false;
6699                         entry = NEXT_TX(entry);
6700                         txb = &tnapi->tx_buffers[entry];
6701                 }
6702         }
6703 }
6704
6705 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6706 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6707                                        struct sk_buff **pskb,
6708                                        u32 *entry, u32 *budget,
6709                                        u32 base_flags, u32 mss, u32 vlan)
6710 {
6711         struct tg3 *tp = tnapi->tp;
6712         struct sk_buff *new_skb, *skb = *pskb;
6713         dma_addr_t new_addr = 0;
6714         int ret = 0;
6715
6716         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6717                 new_skb = skb_copy(skb, GFP_ATOMIC);
6718         else {
6719                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6720
6721                 new_skb = skb_copy_expand(skb,
6722                                           skb_headroom(skb) + more_headroom,
6723                                           skb_tailroom(skb), GFP_ATOMIC);
6724         }
6725
6726         if (!new_skb) {
6727                 ret = -1;
6728         } else {
6729                 /* New SKB is guaranteed to be linear. */
6730                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6731                                           PCI_DMA_TODEVICE);
6732                 /* Make sure the mapping succeeded */
6733                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6734                         dev_kfree_skb(new_skb);
6735                         ret = -1;
6736                 } else {
6737                         u32 save_entry = *entry;
6738
6739                         base_flags |= TXD_FLAG_END;
6740
6741                         tnapi->tx_buffers[*entry].skb = new_skb;
6742                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6743                                            mapping, new_addr);
6744
6745                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6746                                             new_skb->len, base_flags,
6747                                             mss, vlan)) {
6748                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6749                                 dev_kfree_skb(new_skb);
6750                                 ret = -1;
6751                         }
6752                 }
6753         }
6754
6755         dev_kfree_skb(skb);
6756         *pskb = new_skb;
6757         return ret;
6758 }
6759
6760 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6761
6762 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6763  * TSO header is greater than 80 bytes.
6764  */
6765 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6766 {
6767         struct sk_buff *segs, *nskb;
6768         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6769
6770         /* Estimate the number of fragments in the worst case */
6771         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6772                 netif_stop_queue(tp->dev);
6773
6774                 /* netif_tx_stop_queue() must be done before checking
6775                  * checking tx index in tg3_tx_avail() below, because in
6776                  * tg3_tx(), we update tx index before checking for
6777                  * netif_tx_queue_stopped().
6778                  */
6779                 smp_mb();
6780                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6781                         return NETDEV_TX_BUSY;
6782
6783                 netif_wake_queue(tp->dev);
6784         }
6785
6786         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6787         if (IS_ERR(segs))
6788                 goto tg3_tso_bug_end;
6789
6790         do {
6791                 nskb = segs;
6792                 segs = segs->next;
6793                 nskb->next = NULL;
6794                 tg3_start_xmit(nskb, tp->dev);
6795         } while (segs);
6796
6797 tg3_tso_bug_end:
6798         dev_kfree_skb(skb);
6799
6800         return NETDEV_TX_OK;
6801 }
6802
6803 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6804  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6805  */
6806 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6807 {
6808         struct tg3 *tp = netdev_priv(dev);
6809         u32 len, entry, base_flags, mss, vlan = 0;
6810         u32 budget;
6811         int i = -1, would_hit_hwbug;
6812         dma_addr_t mapping;
6813         struct tg3_napi *tnapi;
6814         struct netdev_queue *txq;
6815         unsigned int last;
6816
6817         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6818         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6819         if (tg3_flag(tp, ENABLE_TSS))
6820                 tnapi++;
6821
6822         budget = tg3_tx_avail(tnapi);
6823
6824         /* We are running in BH disabled context with netif_tx_lock
6825          * and TX reclaim runs via tp->napi.poll inside of a software
6826          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6827          * no IRQ context deadlocks to worry about either.  Rejoice!
6828          */
6829         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6830                 if (!netif_tx_queue_stopped(txq)) {
6831                         netif_tx_stop_queue(txq);
6832
6833                         /* This is a hard error, log it. */
6834                         netdev_err(dev,
6835                                    "BUG! Tx Ring full when queue awake!\n");
6836                 }
6837                 return NETDEV_TX_BUSY;
6838         }
6839
6840         entry = tnapi->tx_prod;
6841         base_flags = 0;
6842         if (skb->ip_summed == CHECKSUM_PARTIAL)
6843                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6844
6845         mss = skb_shinfo(skb)->gso_size;
6846         if (mss) {
6847                 struct iphdr *iph;
6848                 u32 tcp_opt_len, hdr_len;
6849
6850                 if (skb_header_cloned(skb) &&
6851                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6852                         goto drop;
6853
6854                 iph = ip_hdr(skb);
6855                 tcp_opt_len = tcp_optlen(skb);
6856
6857                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6858
6859                 if (!skb_is_gso_v6(skb)) {
6860                         iph->check = 0;
6861                         iph->tot_len = htons(mss + hdr_len);
6862                 }
6863
6864                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6865                     tg3_flag(tp, TSO_BUG))
6866                         return tg3_tso_bug(tp, skb);
6867
6868                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6869                                TXD_FLAG_CPU_POST_DMA);
6870
6871                 if (tg3_flag(tp, HW_TSO_1) ||
6872                     tg3_flag(tp, HW_TSO_2) ||
6873                     tg3_flag(tp, HW_TSO_3)) {
6874                         tcp_hdr(skb)->check = 0;
6875                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6876                 } else
6877                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6878                                                                  iph->daddr, 0,
6879                                                                  IPPROTO_TCP,
6880                                                                  0);
6881
6882                 if (tg3_flag(tp, HW_TSO_3)) {
6883                         mss |= (hdr_len & 0xc) << 12;
6884                         if (hdr_len & 0x10)
6885                                 base_flags |= 0x00000010;
6886                         base_flags |= (hdr_len & 0x3e0) << 5;
6887                 } else if (tg3_flag(tp, HW_TSO_2))
6888                         mss |= hdr_len << 9;
6889                 else if (tg3_flag(tp, HW_TSO_1) ||
6890                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6891                         if (tcp_opt_len || iph->ihl > 5) {
6892                                 int tsflags;
6893
6894                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6895                                 mss |= (tsflags << 11);
6896                         }
6897                 } else {
6898                         if (tcp_opt_len || iph->ihl > 5) {
6899                                 int tsflags;
6900
6901                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6902                                 base_flags |= tsflags << 12;
6903                         }
6904                 }
6905         }
6906
6907         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6908             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6909                 base_flags |= TXD_FLAG_JMB_PKT;
6910
6911         if (vlan_tx_tag_present(skb)) {
6912                 base_flags |= TXD_FLAG_VLAN;
6913                 vlan = vlan_tx_tag_get(skb);
6914         }
6915
6916         len = skb_headlen(skb);
6917
6918         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6919         if (pci_dma_mapping_error(tp->pdev, mapping))
6920                 goto drop;
6921
6922
6923         tnapi->tx_buffers[entry].skb = skb;
6924         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6925
6926         would_hit_hwbug = 0;
6927
6928         if (tg3_flag(tp, 5701_DMA_BUG))
6929                 would_hit_hwbug = 1;
6930
6931         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6932                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6933                             mss, vlan)) {
6934                 would_hit_hwbug = 1;
6935         } else if (skb_shinfo(skb)->nr_frags > 0) {
6936                 u32 tmp_mss = mss;
6937
6938                 if (!tg3_flag(tp, HW_TSO_1) &&
6939                     !tg3_flag(tp, HW_TSO_2) &&
6940                     !tg3_flag(tp, HW_TSO_3))
6941                         tmp_mss = 0;
6942
6943                 /* Now loop through additional data
6944                  * fragments, and queue them.
6945                  */
6946                 last = skb_shinfo(skb)->nr_frags - 1;
6947                 for (i = 0; i <= last; i++) {
6948                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6949
6950                         len = skb_frag_size(frag);
6951                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6952                                                    len, DMA_TO_DEVICE);
6953
6954                         tnapi->tx_buffers[entry].skb = NULL;
6955                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6956                                            mapping);
6957                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6958                                 goto dma_error;
6959
6960                         if (!budget ||
6961                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6962                                             len, base_flags |
6963                                             ((i == last) ? TXD_FLAG_END : 0),
6964                                             tmp_mss, vlan)) {
6965                                 would_hit_hwbug = 1;
6966                                 break;
6967                         }
6968                 }
6969         }
6970
6971         if (would_hit_hwbug) {
6972                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6973
6974                 /* If the workaround fails due to memory/mapping
6975                  * failure, silently drop this packet.
6976                  */
6977                 entry = tnapi->tx_prod;
6978                 budget = tg3_tx_avail(tnapi);
6979                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6980                                                 base_flags, mss, vlan))
6981                         goto drop_nofree;
6982         }
6983
6984         skb_tx_timestamp(skb);
6985         netdev_sent_queue(tp->dev, skb->len);
6986
6987         /* Packets are ready, update Tx producer idx local and on card. */
6988         tw32_tx_mbox(tnapi->prodmbox, entry);
6989
6990         tnapi->tx_prod = entry;
6991         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6992                 netif_tx_stop_queue(txq);
6993
6994                 /* netif_tx_stop_queue() must be done before checking
6995                  * checking tx index in tg3_tx_avail() below, because in
6996                  * tg3_tx(), we update tx index before checking for
6997                  * netif_tx_queue_stopped().
6998                  */
6999                 smp_mb();
7000                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7001                         netif_tx_wake_queue(txq);
7002         }
7003
7004         mmiowb();
7005         return NETDEV_TX_OK;
7006
7007 dma_error:
7008         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7009         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7010 drop:
7011         dev_kfree_skb(skb);
7012 drop_nofree:
7013         tp->tx_dropped++;
7014         return NETDEV_TX_OK;
7015 }
7016
7017 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7018 {
7019         if (enable) {
7020                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7021                                   MAC_MODE_PORT_MODE_MASK);
7022
7023                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7024
7025                 if (!tg3_flag(tp, 5705_PLUS))
7026                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7027
7028                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7029                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7030                 else
7031                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7032         } else {
7033                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7034
7035                 if (tg3_flag(tp, 5705_PLUS) ||
7036                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7037                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7038                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7039         }
7040
7041         tw32(MAC_MODE, tp->mac_mode);
7042         udelay(40);
7043 }
7044
7045 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7046 {
7047         u32 val, bmcr, mac_mode, ptest = 0;
7048
7049         tg3_phy_toggle_apd(tp, false);
7050         tg3_phy_toggle_automdix(tp, 0);
7051
7052         if (extlpbk && tg3_phy_set_extloopbk(tp))
7053                 return -EIO;
7054
7055         bmcr = BMCR_FULLDPLX;
7056         switch (speed) {
7057         case SPEED_10:
7058                 break;
7059         case SPEED_100:
7060                 bmcr |= BMCR_SPEED100;
7061                 break;
7062         case SPEED_1000:
7063         default:
7064                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7065                         speed = SPEED_100;
7066                         bmcr |= BMCR_SPEED100;
7067                 } else {
7068                         speed = SPEED_1000;
7069                         bmcr |= BMCR_SPEED1000;
7070                 }
7071         }
7072
7073         if (extlpbk) {
7074                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7075                         tg3_readphy(tp, MII_CTRL1000, &val);
7076                         val |= CTL1000_AS_MASTER |
7077                                CTL1000_ENABLE_MASTER;
7078                         tg3_writephy(tp, MII_CTRL1000, val);
7079                 } else {
7080                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7081                                 MII_TG3_FET_PTEST_TRIM_2;
7082                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7083                 }
7084         } else
7085                 bmcr |= BMCR_LOOPBACK;
7086
7087         tg3_writephy(tp, MII_BMCR, bmcr);
7088
7089         /* The write needs to be flushed for the FETs */
7090         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7091                 tg3_readphy(tp, MII_BMCR, &bmcr);
7092
7093         udelay(40);
7094
7095         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7097                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7098                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7099                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7100
7101                 /* The write needs to be flushed for the AC131 */
7102                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7103         }
7104
7105         /* Reset to prevent losing 1st rx packet intermittently */
7106         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7107             tg3_flag(tp, 5780_CLASS)) {
7108                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7109                 udelay(10);
7110                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7111         }
7112
7113         mac_mode = tp->mac_mode &
7114                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7115         if (speed == SPEED_1000)
7116                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7117         else
7118                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7119
7120         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7121                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7122
7123                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7124                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7125                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7126                         mac_mode |= MAC_MODE_LINK_POLARITY;
7127
7128                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7129                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7130         }
7131
7132         tw32(MAC_MODE, mac_mode);
7133         udelay(40);
7134
7135         return 0;
7136 }
7137
7138 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7139 {
7140         struct tg3 *tp = netdev_priv(dev);
7141
7142         if (features & NETIF_F_LOOPBACK) {
7143                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7144                         return;
7145
7146                 spin_lock_bh(&tp->lock);
7147                 tg3_mac_loopback(tp, true);
7148                 netif_carrier_on(tp->dev);
7149                 spin_unlock_bh(&tp->lock);
7150                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7151         } else {
7152                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7153                         return;
7154
7155                 spin_lock_bh(&tp->lock);
7156                 tg3_mac_loopback(tp, false);
7157                 /* Force link status check */
7158                 tg3_setup_phy(tp, 1);
7159                 spin_unlock_bh(&tp->lock);
7160                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7161         }
7162 }
7163
7164 static netdev_features_t tg3_fix_features(struct net_device *dev,
7165         netdev_features_t features)
7166 {
7167         struct tg3 *tp = netdev_priv(dev);
7168
7169         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7170                 features &= ~NETIF_F_ALL_TSO;
7171
7172         return features;
7173 }
7174
7175 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7176 {
7177         netdev_features_t changed = dev->features ^ features;
7178
7179         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7180                 tg3_set_loopback(dev, features);
7181
7182         return 0;
7183 }
7184
7185 static void tg3_rx_prodring_free(struct tg3 *tp,
7186                                  struct tg3_rx_prodring_set *tpr)
7187 {
7188         int i;
7189
7190         if (tpr != &tp->napi[0].prodring) {
7191                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7192                      i = (i + 1) & tp->rx_std_ring_mask)
7193                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7194                                         tp->rx_pkt_map_sz);
7195
7196                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7197                         for (i = tpr->rx_jmb_cons_idx;
7198                              i != tpr->rx_jmb_prod_idx;
7199                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7200                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7201                                                 TG3_RX_JMB_MAP_SZ);
7202                         }
7203                 }
7204
7205                 return;
7206         }
7207
7208         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7209                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7210                                 tp->rx_pkt_map_sz);
7211
7212         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7213                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7214                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7215                                         TG3_RX_JMB_MAP_SZ);
7216         }
7217 }
7218
7219 /* Initialize rx rings for packet processing.
7220  *
7221  * The chip has been shut down and the driver detached from
7222  * the networking, so no interrupts or new tx packets will
7223  * end up in the driver.  tp->{tx,}lock are held and thus
7224  * we may not sleep.
7225  */
7226 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7227                                  struct tg3_rx_prodring_set *tpr)
7228 {
7229         u32 i, rx_pkt_dma_sz;
7230
7231         tpr->rx_std_cons_idx = 0;
7232         tpr->rx_std_prod_idx = 0;
7233         tpr->rx_jmb_cons_idx = 0;
7234         tpr->rx_jmb_prod_idx = 0;
7235
7236         if (tpr != &tp->napi[0].prodring) {
7237                 memset(&tpr->rx_std_buffers[0], 0,
7238                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7239                 if (tpr->rx_jmb_buffers)
7240                         memset(&tpr->rx_jmb_buffers[0], 0,
7241                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7242                 goto done;
7243         }
7244
7245         /* Zero out all descriptors. */
7246         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7247
7248         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7249         if (tg3_flag(tp, 5780_CLASS) &&
7250             tp->dev->mtu > ETH_DATA_LEN)
7251                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7252         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7253
7254         /* Initialize invariants of the rings, we only set this
7255          * stuff once.  This works because the card does not
7256          * write into the rx buffer posting rings.
7257          */
7258         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7259                 struct tg3_rx_buffer_desc *rxd;
7260
7261                 rxd = &tpr->rx_std[i];
7262                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7263                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7264                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7265                                (i << RXD_OPAQUE_INDEX_SHIFT));
7266         }
7267
7268         /* Now allocate fresh SKBs for each rx ring. */
7269         for (i = 0; i < tp->rx_pending; i++) {
7270                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7271                         netdev_warn(tp->dev,
7272                                     "Using a smaller RX standard ring. Only "
7273                                     "%d out of %d buffers were allocated "
7274                                     "successfully\n", i, tp->rx_pending);
7275                         if (i == 0)
7276                                 goto initfail;
7277                         tp->rx_pending = i;
7278                         break;
7279                 }
7280         }
7281
7282         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7283                 goto done;
7284
7285         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7286
7287         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7288                 goto done;
7289
7290         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7291                 struct tg3_rx_buffer_desc *rxd;
7292
7293                 rxd = &tpr->rx_jmb[i].std;
7294                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7295                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7296                                   RXD_FLAG_JUMBO;
7297                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7298                        (i << RXD_OPAQUE_INDEX_SHIFT));
7299         }
7300
7301         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7302                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7303                         netdev_warn(tp->dev,
7304                                     "Using a smaller RX jumbo ring. Only %d "
7305                                     "out of %d buffers were allocated "
7306                                     "successfully\n", i, tp->rx_jumbo_pending);
7307                         if (i == 0)
7308                                 goto initfail;
7309                         tp->rx_jumbo_pending = i;
7310                         break;
7311                 }
7312         }
7313
7314 done:
7315         return 0;
7316
7317 initfail:
7318         tg3_rx_prodring_free(tp, tpr);
7319         return -ENOMEM;
7320 }
7321
7322 static void tg3_rx_prodring_fini(struct tg3 *tp,
7323                                  struct tg3_rx_prodring_set *tpr)
7324 {
7325         kfree(tpr->rx_std_buffers);
7326         tpr->rx_std_buffers = NULL;
7327         kfree(tpr->rx_jmb_buffers);
7328         tpr->rx_jmb_buffers = NULL;
7329         if (tpr->rx_std) {
7330                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7331                                   tpr->rx_std, tpr->rx_std_mapping);
7332                 tpr->rx_std = NULL;
7333         }
7334         if (tpr->rx_jmb) {
7335                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7336                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7337                 tpr->rx_jmb = NULL;
7338         }
7339 }
7340
7341 static int tg3_rx_prodring_init(struct tg3 *tp,
7342                                 struct tg3_rx_prodring_set *tpr)
7343 {
7344         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7345                                       GFP_KERNEL);
7346         if (!tpr->rx_std_buffers)
7347                 return -ENOMEM;
7348
7349         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7350                                          TG3_RX_STD_RING_BYTES(tp),
7351                                          &tpr->rx_std_mapping,
7352                                          GFP_KERNEL);
7353         if (!tpr->rx_std)
7354                 goto err_out;
7355
7356         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7357                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7358                                               GFP_KERNEL);
7359                 if (!tpr->rx_jmb_buffers)
7360                         goto err_out;
7361
7362                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7363                                                  TG3_RX_JMB_RING_BYTES(tp),
7364                                                  &tpr->rx_jmb_mapping,
7365                                                  GFP_KERNEL);
7366                 if (!tpr->rx_jmb)
7367                         goto err_out;
7368         }
7369
7370         return 0;
7371
7372 err_out:
7373         tg3_rx_prodring_fini(tp, tpr);
7374         return -ENOMEM;
7375 }
7376
7377 /* Free up pending packets in all rx/tx rings.
7378  *
7379  * The chip has been shut down and the driver detached from
7380  * the networking, so no interrupts or new tx packets will
7381  * end up in the driver.  tp->{tx,}lock is not held and we are not
7382  * in an interrupt context and thus may sleep.
7383  */
7384 static void tg3_free_rings(struct tg3 *tp)
7385 {
7386         int i, j;
7387
7388         for (j = 0; j < tp->irq_cnt; j++) {
7389                 struct tg3_napi *tnapi = &tp->napi[j];
7390
7391                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7392
7393                 if (!tnapi->tx_buffers)
7394                         continue;
7395
7396                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7397                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7398
7399                         if (!skb)
7400                                 continue;
7401
7402                         tg3_tx_skb_unmap(tnapi, i,
7403                                          skb_shinfo(skb)->nr_frags - 1);
7404
7405                         dev_kfree_skb_any(skb);
7406                 }
7407         }
7408         netdev_reset_queue(tp->dev);
7409 }
7410
7411 /* Initialize tx/rx rings for packet processing.
7412  *
7413  * The chip has been shut down and the driver detached from
7414  * the networking, so no interrupts or new tx packets will
7415  * end up in the driver.  tp->{tx,}lock are held and thus
7416  * we may not sleep.
7417  */
7418 static int tg3_init_rings(struct tg3 *tp)
7419 {
7420         int i;
7421
7422         /* Free up all the SKBs. */
7423         tg3_free_rings(tp);
7424
7425         for (i = 0; i < tp->irq_cnt; i++) {
7426                 struct tg3_napi *tnapi = &tp->napi[i];
7427
7428                 tnapi->last_tag = 0;
7429                 tnapi->last_irq_tag = 0;
7430                 tnapi->hw_status->status = 0;
7431                 tnapi->hw_status->status_tag = 0;
7432                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7433
7434                 tnapi->tx_prod = 0;
7435                 tnapi->tx_cons = 0;
7436                 if (tnapi->tx_ring)
7437                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7438
7439                 tnapi->rx_rcb_ptr = 0;
7440                 if (tnapi->rx_rcb)
7441                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7442
7443                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7444                         tg3_free_rings(tp);
7445                         return -ENOMEM;
7446                 }
7447         }
7448
7449         return 0;
7450 }
7451
7452 /*
7453  * Must not be invoked with interrupt sources disabled and
7454  * the hardware shutdown down.
7455  */
7456 static void tg3_free_consistent(struct tg3 *tp)
7457 {
7458         int i;
7459
7460         for (i = 0; i < tp->irq_cnt; i++) {
7461                 struct tg3_napi *tnapi = &tp->napi[i];
7462
7463                 if (tnapi->tx_ring) {
7464                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7465                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7466                         tnapi->tx_ring = NULL;
7467                 }
7468
7469                 kfree(tnapi->tx_buffers);
7470                 tnapi->tx_buffers = NULL;
7471
7472                 if (tnapi->rx_rcb) {
7473                         dma_free_coherent(&tp->pdev->dev,
7474                                           TG3_RX_RCB_RING_BYTES(tp),
7475                                           tnapi->rx_rcb,
7476                                           tnapi->rx_rcb_mapping);
7477                         tnapi->rx_rcb = NULL;
7478                 }
7479
7480                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7481
7482                 if (tnapi->hw_status) {
7483                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7484                                           tnapi->hw_status,
7485                                           tnapi->status_mapping);
7486                         tnapi->hw_status = NULL;
7487                 }
7488         }
7489
7490         if (tp->hw_stats) {
7491                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7492                                   tp->hw_stats, tp->stats_mapping);
7493                 tp->hw_stats = NULL;
7494         }
7495 }
7496
7497 /*
7498  * Must not be invoked with interrupt sources disabled and
7499  * the hardware shutdown down.  Can sleep.
7500  */
7501 static int tg3_alloc_consistent(struct tg3 *tp)
7502 {
7503         int i;
7504
7505         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7506                                           sizeof(struct tg3_hw_stats),
7507                                           &tp->stats_mapping,
7508                                           GFP_KERNEL);
7509         if (!tp->hw_stats)
7510                 goto err_out;
7511
7512         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7513
7514         for (i = 0; i < tp->irq_cnt; i++) {
7515                 struct tg3_napi *tnapi = &tp->napi[i];
7516                 struct tg3_hw_status *sblk;
7517
7518                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7519                                                       TG3_HW_STATUS_SIZE,
7520                                                       &tnapi->status_mapping,
7521                                                       GFP_KERNEL);
7522                 if (!tnapi->hw_status)
7523                         goto err_out;
7524
7525                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7526                 sblk = tnapi->hw_status;
7527
7528                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7529                         goto err_out;
7530
7531                 /* If multivector TSS is enabled, vector 0 does not handle
7532                  * tx interrupts.  Don't allocate any resources for it.
7533                  */
7534                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7535                     (i && tg3_flag(tp, ENABLE_TSS))) {
7536                         tnapi->tx_buffers = kzalloc(
7537                                                sizeof(struct tg3_tx_ring_info) *
7538                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7539                         if (!tnapi->tx_buffers)
7540                                 goto err_out;
7541
7542                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7543                                                             TG3_TX_RING_BYTES,
7544                                                         &tnapi->tx_desc_mapping,
7545                                                             GFP_KERNEL);
7546                         if (!tnapi->tx_ring)
7547                                 goto err_out;
7548                 }
7549
7550                 /*
7551                  * When RSS is enabled, the status block format changes
7552                  * slightly.  The "rx_jumbo_consumer", "reserved",
7553                  * and "rx_mini_consumer" members get mapped to the
7554                  * other three rx return ring producer indexes.
7555                  */
7556                 switch (i) {
7557                 default:
7558                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7559                         break;
7560                 case 2:
7561                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7562                         break;
7563                 case 3:
7564                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7565                         break;
7566                 case 4:
7567                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7568                         break;
7569                 }
7570
7571                 /*
7572                  * If multivector RSS is enabled, vector 0 does not handle
7573                  * rx or tx interrupts.  Don't allocate any resources for it.
7574                  */
7575                 if (!i && tg3_flag(tp, ENABLE_RSS))
7576                         continue;
7577
7578                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7579                                                    TG3_RX_RCB_RING_BYTES(tp),
7580                                                    &tnapi->rx_rcb_mapping,
7581                                                    GFP_KERNEL);
7582                 if (!tnapi->rx_rcb)
7583                         goto err_out;
7584
7585                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7586         }
7587
7588         return 0;
7589
7590 err_out:
7591         tg3_free_consistent(tp);
7592         return -ENOMEM;
7593 }
7594
7595 #define MAX_WAIT_CNT 1000
7596
7597 /* To stop a block, clear the enable bit and poll till it
7598  * clears.  tp->lock is held.
7599  */
7600 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7601 {
7602         unsigned int i;
7603         u32 val;
7604
7605         if (tg3_flag(tp, 5705_PLUS)) {
7606                 switch (ofs) {
7607                 case RCVLSC_MODE:
7608                 case DMAC_MODE:
7609                 case MBFREE_MODE:
7610                 case BUFMGR_MODE:
7611                 case MEMARB_MODE:
7612                         /* We can't enable/disable these bits of the
7613                          * 5705/5750, just say success.
7614                          */
7615                         return 0;
7616
7617                 default:
7618                         break;
7619                 }
7620         }
7621
7622         val = tr32(ofs);
7623         val &= ~enable_bit;
7624         tw32_f(ofs, val);
7625
7626         for (i = 0; i < MAX_WAIT_CNT; i++) {
7627                 udelay(100);
7628                 val = tr32(ofs);
7629                 if ((val & enable_bit) == 0)
7630                         break;
7631         }
7632
7633         if (i == MAX_WAIT_CNT && !silent) {
7634                 dev_err(&tp->pdev->dev,
7635                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7636                         ofs, enable_bit);
7637                 return -ENODEV;
7638         }
7639
7640         return 0;
7641 }
7642
7643 /* tp->lock is held. */
7644 static int tg3_abort_hw(struct tg3 *tp, int silent)
7645 {
7646         int i, err;
7647
7648         tg3_disable_ints(tp);
7649
7650         tp->rx_mode &= ~RX_MODE_ENABLE;
7651         tw32_f(MAC_RX_MODE, tp->rx_mode);
7652         udelay(10);
7653
7654         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7655         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7656         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7657         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7658         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7659         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7660
7661         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7662         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7663         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7664         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7665         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7666         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7667         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7668
7669         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7670         tw32_f(MAC_MODE, tp->mac_mode);
7671         udelay(40);
7672
7673         tp->tx_mode &= ~TX_MODE_ENABLE;
7674         tw32_f(MAC_TX_MODE, tp->tx_mode);
7675
7676         for (i = 0; i < MAX_WAIT_CNT; i++) {
7677                 udelay(100);
7678                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7679                         break;
7680         }
7681         if (i >= MAX_WAIT_CNT) {
7682                 dev_err(&tp->pdev->dev,
7683                         "%s timed out, TX_MODE_ENABLE will not clear "
7684                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7685                 err |= -ENODEV;
7686         }
7687
7688         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7689         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7690         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7691
7692         tw32(FTQ_RESET, 0xffffffff);
7693         tw32(FTQ_RESET, 0x00000000);
7694
7695         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7696         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7697
7698         for (i = 0; i < tp->irq_cnt; i++) {
7699                 struct tg3_napi *tnapi = &tp->napi[i];
7700                 if (tnapi->hw_status)
7701                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7702         }
7703
7704         return err;
7705 }
7706
7707 /* Save PCI command register before chip reset */
7708 static void tg3_save_pci_state(struct tg3 *tp)
7709 {
7710         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7711 }
7712
7713 /* Restore PCI state after chip reset */
7714 static void tg3_restore_pci_state(struct tg3 *tp)
7715 {
7716         u32 val;
7717
7718         /* Re-enable indirect register accesses. */
7719         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7720                                tp->misc_host_ctrl);
7721
7722         /* Set MAX PCI retry to zero. */
7723         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7724         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7725             tg3_flag(tp, PCIX_MODE))
7726                 val |= PCISTATE_RETRY_SAME_DMA;
7727         /* Allow reads and writes to the APE register and memory space. */
7728         if (tg3_flag(tp, ENABLE_APE))
7729                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7730                        PCISTATE_ALLOW_APE_SHMEM_WR |
7731                        PCISTATE_ALLOW_APE_PSPACE_WR;
7732         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7733
7734         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7735
7736         if (!tg3_flag(tp, PCI_EXPRESS)) {
7737                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7738                                       tp->pci_cacheline_sz);
7739                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7740                                       tp->pci_lat_timer);
7741         }
7742
7743         /* Make sure PCI-X relaxed ordering bit is clear. */
7744         if (tg3_flag(tp, PCIX_MODE)) {
7745                 u16 pcix_cmd;
7746
7747                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7748                                      &pcix_cmd);
7749                 pcix_cmd &= ~PCI_X_CMD_ERO;
7750                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7751                                       pcix_cmd);
7752         }
7753
7754         if (tg3_flag(tp, 5780_CLASS)) {
7755
7756                 /* Chip reset on 5780 will reset MSI enable bit,
7757                  * so need to restore it.
7758                  */
7759                 if (tg3_flag(tp, USING_MSI)) {
7760                         u16 ctrl;
7761
7762                         pci_read_config_word(tp->pdev,
7763                                              tp->msi_cap + PCI_MSI_FLAGS,
7764                                              &ctrl);
7765                         pci_write_config_word(tp->pdev,
7766                                               tp->msi_cap + PCI_MSI_FLAGS,
7767                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7768                         val = tr32(MSGINT_MODE);
7769                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7770                 }
7771         }
7772 }
7773
7774 /* tp->lock is held. */
7775 static int tg3_chip_reset(struct tg3 *tp)
7776 {
7777         u32 val;
7778         void (*write_op)(struct tg3 *, u32, u32);
7779         int i, err;
7780
7781         tg3_nvram_lock(tp);
7782
7783         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7784
7785         /* No matching tg3_nvram_unlock() after this because
7786          * chip reset below will undo the nvram lock.
7787          */
7788         tp->nvram_lock_cnt = 0;
7789
7790         /* GRC_MISC_CFG core clock reset will clear the memory
7791          * enable bit in PCI register 4 and the MSI enable bit
7792          * on some chips, so we save relevant registers here.
7793          */
7794         tg3_save_pci_state(tp);
7795
7796         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7797             tg3_flag(tp, 5755_PLUS))
7798                 tw32(GRC_FASTBOOT_PC, 0);
7799
7800         /*
7801          * We must avoid the readl() that normally takes place.
7802          * It locks machines, causes machine checks, and other
7803          * fun things.  So, temporarily disable the 5701
7804          * hardware workaround, while we do the reset.
7805          */
7806         write_op = tp->write32;
7807         if (write_op == tg3_write_flush_reg32)
7808                 tp->write32 = tg3_write32;
7809
7810         /* Prevent the irq handler from reading or writing PCI registers
7811          * during chip reset when the memory enable bit in the PCI command
7812          * register may be cleared.  The chip does not generate interrupt
7813          * at this time, but the irq handler may still be called due to irq
7814          * sharing or irqpoll.
7815          */
7816         tg3_flag_set(tp, CHIP_RESETTING);
7817         for (i = 0; i < tp->irq_cnt; i++) {
7818                 struct tg3_napi *tnapi = &tp->napi[i];
7819                 if (tnapi->hw_status) {
7820                         tnapi->hw_status->status = 0;
7821                         tnapi->hw_status->status_tag = 0;
7822                 }
7823                 tnapi->last_tag = 0;
7824                 tnapi->last_irq_tag = 0;
7825         }
7826         smp_mb();
7827
7828         for (i = 0; i < tp->irq_cnt; i++)
7829                 synchronize_irq(tp->napi[i].irq_vec);
7830
7831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7832                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7833                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7834         }
7835
7836         /* do the reset */
7837         val = GRC_MISC_CFG_CORECLK_RESET;
7838
7839         if (tg3_flag(tp, PCI_EXPRESS)) {
7840                 /* Force PCIe 1.0a mode */
7841                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7842                     !tg3_flag(tp, 57765_PLUS) &&
7843                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7844                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7845                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7846
7847                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7848                         tw32(GRC_MISC_CFG, (1 << 29));
7849                         val |= (1 << 29);
7850                 }
7851         }
7852
7853         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7854                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7855                 tw32(GRC_VCPU_EXT_CTRL,
7856                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7857         }
7858
7859         /* Manage gphy power for all CPMU absent PCIe devices. */
7860         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7861                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7862
7863         tw32(GRC_MISC_CFG, val);
7864
7865         /* restore 5701 hardware bug workaround write method */
7866         tp->write32 = write_op;
7867
7868         /* Unfortunately, we have to delay before the PCI read back.
7869          * Some 575X chips even will not respond to a PCI cfg access
7870          * when the reset command is given to the chip.
7871          *
7872          * How do these hardware designers expect things to work
7873          * properly if the PCI write is posted for a long period
7874          * of time?  It is always necessary to have some method by
7875          * which a register read back can occur to push the write
7876          * out which does the reset.
7877          *
7878          * For most tg3 variants the trick below was working.
7879          * Ho hum...
7880          */
7881         udelay(120);
7882
7883         /* Flush PCI posted writes.  The normal MMIO registers
7884          * are inaccessible at this time so this is the only
7885          * way to make this reliably (actually, this is no longer
7886          * the case, see above).  I tried to use indirect
7887          * register read/write but this upset some 5701 variants.
7888          */
7889         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7890
7891         udelay(120);
7892
7893         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7894                 u16 val16;
7895
7896                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7897                         int i;
7898                         u32 cfg_val;
7899
7900                         /* Wait for link training to complete.  */
7901                         for (i = 0; i < 5000; i++)
7902                                 udelay(100);
7903
7904                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7905                         pci_write_config_dword(tp->pdev, 0xc4,
7906                                                cfg_val | (1 << 15));
7907                 }
7908
7909                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7910                 pci_read_config_word(tp->pdev,
7911                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7912                                      &val16);
7913                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7914                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7915                 /*
7916                  * Older PCIe devices only support the 128 byte
7917                  * MPS setting.  Enforce the restriction.
7918                  */
7919                 if (!tg3_flag(tp, CPMU_PRESENT))
7920                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7921                 pci_write_config_word(tp->pdev,
7922                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7923                                       val16);
7924
7925                 /* Clear error status */
7926                 pci_write_config_word(tp->pdev,
7927                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7928                                       PCI_EXP_DEVSTA_CED |
7929                                       PCI_EXP_DEVSTA_NFED |
7930                                       PCI_EXP_DEVSTA_FED |
7931                                       PCI_EXP_DEVSTA_URD);
7932         }
7933
7934         tg3_restore_pci_state(tp);
7935
7936         tg3_flag_clear(tp, CHIP_RESETTING);
7937         tg3_flag_clear(tp, ERROR_PROCESSED);
7938
7939         val = 0;
7940         if (tg3_flag(tp, 5780_CLASS))
7941                 val = tr32(MEMARB_MODE);
7942         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7943
7944         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7945                 tg3_stop_fw(tp);
7946                 tw32(0x5000, 0x400);
7947         }
7948
7949         tw32(GRC_MODE, tp->grc_mode);
7950
7951         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7952                 val = tr32(0xc4);
7953
7954                 tw32(0xc4, val | (1 << 15));
7955         }
7956
7957         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7959                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7960                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7961                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7962                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7963         }
7964
7965         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7966                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7967                 val = tp->mac_mode;
7968         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7969                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7970                 val = tp->mac_mode;
7971         } else
7972                 val = 0;
7973
7974         tw32_f(MAC_MODE, val);
7975         udelay(40);
7976
7977         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7978
7979         err = tg3_poll_fw(tp);
7980         if (err)
7981                 return err;
7982
7983         tg3_mdio_start(tp);
7984
7985         if (tg3_flag(tp, PCI_EXPRESS) &&
7986             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7987             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7988             !tg3_flag(tp, 57765_PLUS)) {
7989                 val = tr32(0x7c00);
7990
7991                 tw32(0x7c00, val | (1 << 25));
7992         }
7993
7994         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7995                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7996                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7997         }
7998
7999         /* Reprobe ASF enable state.  */
8000         tg3_flag_clear(tp, ENABLE_ASF);
8001         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8002         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8003         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8004                 u32 nic_cfg;
8005
8006                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8007                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8008                         tg3_flag_set(tp, ENABLE_ASF);
8009                         tp->last_event_jiffies = jiffies;
8010                         if (tg3_flag(tp, 5750_PLUS))
8011                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8012                 }
8013         }
8014
8015         return 0;
8016 }
8017
8018 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8019                                                  struct rtnl_link_stats64 *);
8020 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8021                                                 struct tg3_ethtool_stats *);
8022
8023 /* tp->lock is held. */
8024 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8025 {
8026         int err;
8027
8028         tg3_stop_fw(tp);
8029
8030         tg3_write_sig_pre_reset(tp, kind);
8031
8032         tg3_abort_hw(tp, silent);
8033         err = tg3_chip_reset(tp);
8034
8035         __tg3_set_mac_addr(tp, 0);
8036
8037         tg3_write_sig_legacy(tp, kind);
8038         tg3_write_sig_post_reset(tp, kind);
8039
8040         if (tp->hw_stats) {
8041                 /* Save the stats across chip resets... */
8042                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8043                 tg3_get_estats(tp, &tp->estats_prev);
8044
8045                 /* And make sure the next sample is new data */
8046                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8047         }
8048
8049         if (err)
8050                 return err;
8051
8052         return 0;
8053 }
8054
8055 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8056 {
8057         struct tg3 *tp = netdev_priv(dev);
8058         struct sockaddr *addr = p;
8059         int err = 0, skip_mac_1 = 0;
8060
8061         if (!is_valid_ether_addr(addr->sa_data))
8062                 return -EINVAL;
8063
8064         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8065
8066         if (!netif_running(dev))
8067                 return 0;
8068
8069         if (tg3_flag(tp, ENABLE_ASF)) {
8070                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8071
8072                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8073                 addr0_low = tr32(MAC_ADDR_0_LOW);
8074                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8075                 addr1_low = tr32(MAC_ADDR_1_LOW);
8076
8077                 /* Skip MAC addr 1 if ASF is using it. */
8078                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8079                     !(addr1_high == 0 && addr1_low == 0))
8080                         skip_mac_1 = 1;
8081         }
8082         spin_lock_bh(&tp->lock);
8083         __tg3_set_mac_addr(tp, skip_mac_1);
8084         spin_unlock_bh(&tp->lock);
8085
8086         return err;
8087 }
8088
8089 /* tp->lock is held. */
8090 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8091                            dma_addr_t mapping, u32 maxlen_flags,
8092                            u32 nic_addr)
8093 {
8094         tg3_write_mem(tp,
8095                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8096                       ((u64) mapping >> 32));
8097         tg3_write_mem(tp,
8098                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8099                       ((u64) mapping & 0xffffffff));
8100         tg3_write_mem(tp,
8101                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8102                        maxlen_flags);
8103
8104         if (!tg3_flag(tp, 5705_PLUS))
8105                 tg3_write_mem(tp,
8106                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8107                               nic_addr);
8108 }
8109
8110 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8111 {
8112         int i;
8113
8114         if (!tg3_flag(tp, ENABLE_TSS)) {
8115                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8116                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8117                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8118         } else {
8119                 tw32(HOSTCC_TXCOL_TICKS, 0);
8120                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8121                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8122         }
8123
8124         if (!tg3_flag(tp, ENABLE_RSS)) {
8125                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8126                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8127                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8128         } else {
8129                 tw32(HOSTCC_RXCOL_TICKS, 0);
8130                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8131                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8132         }
8133
8134         if (!tg3_flag(tp, 5705_PLUS)) {
8135                 u32 val = ec->stats_block_coalesce_usecs;
8136
8137                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8138                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8139
8140                 if (!netif_carrier_ok(tp->dev))
8141                         val = 0;
8142
8143                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8144         }
8145
8146         for (i = 0; i < tp->irq_cnt - 1; i++) {
8147                 u32 reg;
8148
8149                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8150                 tw32(reg, ec->rx_coalesce_usecs);
8151                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8152                 tw32(reg, ec->rx_max_coalesced_frames);
8153                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8154                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8155
8156                 if (tg3_flag(tp, ENABLE_TSS)) {
8157                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8158                         tw32(reg, ec->tx_coalesce_usecs);
8159                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8160                         tw32(reg, ec->tx_max_coalesced_frames);
8161                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8162                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8163                 }
8164         }
8165
8166         for (; i < tp->irq_max - 1; i++) {
8167                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8168                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8169                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8170
8171                 if (tg3_flag(tp, ENABLE_TSS)) {
8172                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8173                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8174                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8175                 }
8176         }
8177 }
8178
8179 /* tp->lock is held. */
8180 static void tg3_rings_reset(struct tg3 *tp)
8181 {
8182         int i;
8183         u32 stblk, txrcb, rxrcb, limit;
8184         struct tg3_napi *tnapi = &tp->napi[0];
8185
8186         /* Disable all transmit rings but the first. */
8187         if (!tg3_flag(tp, 5705_PLUS))
8188                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8189         else if (tg3_flag(tp, 5717_PLUS))
8190                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8191         else if (tg3_flag(tp, 57765_CLASS))
8192                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8193         else
8194                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8195
8196         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8197              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8198                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8199                               BDINFO_FLAGS_DISABLED);
8200
8201
8202         /* Disable all receive return rings but the first. */
8203         if (tg3_flag(tp, 5717_PLUS))
8204                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8205         else if (!tg3_flag(tp, 5705_PLUS))
8206                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8207         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8208                  tg3_flag(tp, 57765_CLASS))
8209                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8210         else
8211                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8212
8213         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8214              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8215                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8216                               BDINFO_FLAGS_DISABLED);
8217
8218         /* Disable interrupts */
8219         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8220         tp->napi[0].chk_msi_cnt = 0;
8221         tp->napi[0].last_rx_cons = 0;
8222         tp->napi[0].last_tx_cons = 0;
8223
8224         /* Zero mailbox registers. */
8225         if (tg3_flag(tp, SUPPORT_MSIX)) {
8226                 for (i = 1; i < tp->irq_max; i++) {
8227                         tp->napi[i].tx_prod = 0;
8228                         tp->napi[i].tx_cons = 0;
8229                         if (tg3_flag(tp, ENABLE_TSS))
8230                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8231                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8232                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8233                         tp->napi[i].chk_msi_cnt = 0;
8234                         tp->napi[i].last_rx_cons = 0;
8235                         tp->napi[i].last_tx_cons = 0;
8236                 }
8237                 if (!tg3_flag(tp, ENABLE_TSS))
8238                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8239         } else {
8240                 tp->napi[0].tx_prod = 0;
8241                 tp->napi[0].tx_cons = 0;
8242                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8243                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8244         }
8245
8246         /* Make sure the NIC-based send BD rings are disabled. */
8247         if (!tg3_flag(tp, 5705_PLUS)) {
8248                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8249                 for (i = 0; i < 16; i++)
8250                         tw32_tx_mbox(mbox + i * 8, 0);
8251         }
8252
8253         txrcb = NIC_SRAM_SEND_RCB;
8254         rxrcb = NIC_SRAM_RCV_RET_RCB;
8255
8256         /* Clear status block in ram. */
8257         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8258
8259         /* Set status block DMA address */
8260         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8261              ((u64) tnapi->status_mapping >> 32));
8262         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8263              ((u64) tnapi->status_mapping & 0xffffffff));
8264
8265         if (tnapi->tx_ring) {
8266                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8267                                (TG3_TX_RING_SIZE <<
8268                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8269                                NIC_SRAM_TX_BUFFER_DESC);
8270                 txrcb += TG3_BDINFO_SIZE;
8271         }
8272
8273         if (tnapi->rx_rcb) {
8274                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8275                                (tp->rx_ret_ring_mask + 1) <<
8276                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8277                 rxrcb += TG3_BDINFO_SIZE;
8278         }
8279
8280         stblk = HOSTCC_STATBLCK_RING1;
8281
8282         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8283                 u64 mapping = (u64)tnapi->status_mapping;
8284                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8285                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8286
8287                 /* Clear status block in ram. */
8288                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8289
8290                 if (tnapi->tx_ring) {
8291                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8292                                        (TG3_TX_RING_SIZE <<
8293                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8294                                        NIC_SRAM_TX_BUFFER_DESC);
8295                         txrcb += TG3_BDINFO_SIZE;
8296                 }
8297
8298                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8299                                ((tp->rx_ret_ring_mask + 1) <<
8300                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8301
8302                 stblk += 8;
8303                 rxrcb += TG3_BDINFO_SIZE;
8304         }
8305 }
8306
8307 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8308 {
8309         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8310
8311         if (!tg3_flag(tp, 5750_PLUS) ||
8312             tg3_flag(tp, 5780_CLASS) ||
8313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8314             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8315             tg3_flag(tp, 57765_PLUS))
8316                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8317         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8318                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8319                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8320         else
8321                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8322
8323         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8324         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8325
8326         val = min(nic_rep_thresh, host_rep_thresh);
8327         tw32(RCVBDI_STD_THRESH, val);
8328
8329         if (tg3_flag(tp, 57765_PLUS))
8330                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8331
8332         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8333                 return;
8334
8335         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8336
8337         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8338
8339         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8340         tw32(RCVBDI_JUMBO_THRESH, val);
8341
8342         if (tg3_flag(tp, 57765_PLUS))
8343                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8344 }
8345
8346 static inline u32 calc_crc(unsigned char *buf, int len)
8347 {
8348         u32 reg;
8349         u32 tmp;
8350         int j, k;
8351
8352         reg = 0xffffffff;
8353
8354         for (j = 0; j < len; j++) {
8355                 reg ^= buf[j];
8356
8357                 for (k = 0; k < 8; k++) {
8358                         tmp = reg & 0x01;
8359
8360                         reg >>= 1;
8361
8362                         if (tmp)
8363                                 reg ^= 0xedb88320;
8364                 }
8365         }
8366
8367         return ~reg;
8368 }
8369
8370 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8371 {
8372         /* accept or reject all multicast frames */
8373         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8374         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8375         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8376         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8377 }
8378
8379 static void __tg3_set_rx_mode(struct net_device *dev)
8380 {
8381         struct tg3 *tp = netdev_priv(dev);
8382         u32 rx_mode;
8383
8384         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8385                                   RX_MODE_KEEP_VLAN_TAG);
8386
8387 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8388         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8389          * flag clear.
8390          */
8391         if (!tg3_flag(tp, ENABLE_ASF))
8392                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8393 #endif
8394
8395         if (dev->flags & IFF_PROMISC) {
8396                 /* Promiscuous mode. */
8397                 rx_mode |= RX_MODE_PROMISC;
8398         } else if (dev->flags & IFF_ALLMULTI) {
8399                 /* Accept all multicast. */
8400                 tg3_set_multi(tp, 1);
8401         } else if (netdev_mc_empty(dev)) {
8402                 /* Reject all multicast. */
8403                 tg3_set_multi(tp, 0);
8404         } else {
8405                 /* Accept one or more multicast(s). */
8406                 struct netdev_hw_addr *ha;
8407                 u32 mc_filter[4] = { 0, };
8408                 u32 regidx;
8409                 u32 bit;
8410                 u32 crc;
8411
8412                 netdev_for_each_mc_addr(ha, dev) {
8413                         crc = calc_crc(ha->addr, ETH_ALEN);
8414                         bit = ~crc & 0x7f;
8415                         regidx = (bit & 0x60) >> 5;
8416                         bit &= 0x1f;
8417                         mc_filter[regidx] |= (1 << bit);
8418                 }
8419
8420                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8421                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8422                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8423                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8424         }
8425
8426         if (rx_mode != tp->rx_mode) {
8427                 tp->rx_mode = rx_mode;
8428                 tw32_f(MAC_RX_MODE, rx_mode);
8429                 udelay(10);
8430         }
8431 }
8432
8433 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8434 {
8435         int i;
8436
8437         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8438                 tp->rss_ind_tbl[i] =
8439                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8440 }
8441
8442 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8443 {
8444         int i;
8445
8446         if (!tg3_flag(tp, SUPPORT_MSIX))
8447                 return;
8448
8449         if (tp->irq_cnt <= 2) {
8450                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8451                 return;
8452         }
8453
8454         /* Validate table against current IRQ count */
8455         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8456                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8457                         break;
8458         }
8459
8460         if (i != TG3_RSS_INDIR_TBL_SIZE)
8461                 tg3_rss_init_dflt_indir_tbl(tp);
8462 }
8463
8464 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8465 {
8466         int i = 0;
8467         u32 reg = MAC_RSS_INDIR_TBL_0;
8468
8469         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8470                 u32 val = tp->rss_ind_tbl[i];
8471                 i++;
8472                 for (; i % 8; i++) {
8473                         val <<= 4;
8474                         val |= tp->rss_ind_tbl[i];
8475                 }
8476                 tw32(reg, val);
8477                 reg += 4;
8478         }
8479 }
8480
8481 /* tp->lock is held. */
8482 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8483 {
8484         u32 val, rdmac_mode;
8485         int i, err, limit;
8486         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8487
8488         tg3_disable_ints(tp);
8489
8490         tg3_stop_fw(tp);
8491
8492         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8493
8494         if (tg3_flag(tp, INIT_COMPLETE))
8495                 tg3_abort_hw(tp, 1);
8496
8497         /* Enable MAC control of LPI */
8498         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8499                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8500                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8501                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8502
8503                 tw32_f(TG3_CPMU_EEE_CTRL,
8504                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8505
8506                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8507                       TG3_CPMU_EEEMD_LPI_IN_TX |
8508                       TG3_CPMU_EEEMD_LPI_IN_RX |
8509                       TG3_CPMU_EEEMD_EEE_ENABLE;
8510
8511                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8512                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8513
8514                 if (tg3_flag(tp, ENABLE_APE))
8515                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8516
8517                 tw32_f(TG3_CPMU_EEE_MODE, val);
8518
8519                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8520                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8521                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8522
8523                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8524                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8525                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8526         }
8527
8528         if (reset_phy)
8529                 tg3_phy_reset(tp);
8530
8531         err = tg3_chip_reset(tp);
8532         if (err)
8533                 return err;
8534
8535         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8536
8537         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8538                 val = tr32(TG3_CPMU_CTRL);
8539                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8540                 tw32(TG3_CPMU_CTRL, val);
8541
8542                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8543                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8544                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8545                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8546
8547                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8548                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8549                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8550                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8551
8552                 val = tr32(TG3_CPMU_HST_ACC);
8553                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8554                 val |= CPMU_HST_ACC_MACCLK_6_25;
8555                 tw32(TG3_CPMU_HST_ACC, val);
8556         }
8557
8558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8559                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8560                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8561                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8562                 tw32(PCIE_PWR_MGMT_THRESH, val);
8563
8564                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8565                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8566
8567                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8568
8569                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8570                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8571         }
8572
8573         if (tg3_flag(tp, L1PLLPD_EN)) {
8574                 u32 grc_mode = tr32(GRC_MODE);
8575
8576                 /* Access the lower 1K of PL PCIE block registers. */
8577                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8578                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8579
8580                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8581                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8582                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8583
8584                 tw32(GRC_MODE, grc_mode);
8585         }
8586
8587         if (tg3_flag(tp, 57765_CLASS)) {
8588                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8589                         u32 grc_mode = tr32(GRC_MODE);
8590
8591                         /* Access the lower 1K of PL PCIE block registers. */
8592                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8593                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8594
8595                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8596                                    TG3_PCIE_PL_LO_PHYCTL5);
8597                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8598                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8599
8600                         tw32(GRC_MODE, grc_mode);
8601                 }
8602
8603                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8604                         u32 grc_mode = tr32(GRC_MODE);
8605
8606                         /* Access the lower 1K of DL PCIE block registers. */
8607                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8608                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8609
8610                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8611                                    TG3_PCIE_DL_LO_FTSMAX);
8612                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8613                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8614                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8615
8616                         tw32(GRC_MODE, grc_mode);
8617                 }
8618
8619                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8620                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8621                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8622                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8623         }
8624
8625         /* This works around an issue with Athlon chipsets on
8626          * B3 tigon3 silicon.  This bit has no effect on any
8627          * other revision.  But do not set this on PCI Express
8628          * chips and don't even touch the clocks if the CPMU is present.
8629          */
8630         if (!tg3_flag(tp, CPMU_PRESENT)) {
8631                 if (!tg3_flag(tp, PCI_EXPRESS))
8632                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8633                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8634         }
8635
8636         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8637             tg3_flag(tp, PCIX_MODE)) {
8638                 val = tr32(TG3PCI_PCISTATE);
8639                 val |= PCISTATE_RETRY_SAME_DMA;
8640                 tw32(TG3PCI_PCISTATE, val);
8641         }
8642
8643         if (tg3_flag(tp, ENABLE_APE)) {
8644                 /* Allow reads and writes to the
8645                  * APE register and memory space.
8646                  */
8647                 val = tr32(TG3PCI_PCISTATE);
8648                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8649                        PCISTATE_ALLOW_APE_SHMEM_WR |
8650                        PCISTATE_ALLOW_APE_PSPACE_WR;
8651                 tw32(TG3PCI_PCISTATE, val);
8652         }
8653
8654         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8655                 /* Enable some hw fixes.  */
8656                 val = tr32(TG3PCI_MSI_DATA);
8657                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8658                 tw32(TG3PCI_MSI_DATA, val);
8659         }
8660
8661         /* Descriptor ring init may make accesses to the
8662          * NIC SRAM area to setup the TX descriptors, so we
8663          * can only do this after the hardware has been
8664          * successfully reset.
8665          */
8666         err = tg3_init_rings(tp);
8667         if (err)
8668                 return err;
8669
8670         if (tg3_flag(tp, 57765_PLUS)) {
8671                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8672                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8673                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8674                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8675                 if (!tg3_flag(tp, 57765_CLASS) &&
8676                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8677                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8678                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8679         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8680                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8681                 /* This value is determined during the probe time DMA
8682                  * engine test, tg3_test_dma.
8683                  */
8684                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8685         }
8686
8687         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8688                           GRC_MODE_4X_NIC_SEND_RINGS |
8689                           GRC_MODE_NO_TX_PHDR_CSUM |
8690                           GRC_MODE_NO_RX_PHDR_CSUM);
8691         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8692
8693         /* Pseudo-header checksum is done by hardware logic and not
8694          * the offload processers, so make the chip do the pseudo-
8695          * header checksums on receive.  For transmit it is more
8696          * convenient to do the pseudo-header checksum in software
8697          * as Linux does that on transmit for us in all cases.
8698          */
8699         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8700
8701         tw32(GRC_MODE,
8702              tp->grc_mode |
8703              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8704
8705         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8706         val = tr32(GRC_MISC_CFG);
8707         val &= ~0xff;
8708         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8709         tw32(GRC_MISC_CFG, val);
8710
8711         /* Initialize MBUF/DESC pool. */
8712         if (tg3_flag(tp, 5750_PLUS)) {
8713                 /* Do nothing.  */
8714         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8715                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8716                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8717                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8718                 else
8719                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8720                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8721                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8722         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8723                 int fw_len;
8724
8725                 fw_len = tp->fw_len;
8726                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8727                 tw32(BUFMGR_MB_POOL_ADDR,
8728                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8729                 tw32(BUFMGR_MB_POOL_SIZE,
8730                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8731         }
8732
8733         if (tp->dev->mtu <= ETH_DATA_LEN) {
8734                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8735                      tp->bufmgr_config.mbuf_read_dma_low_water);
8736                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8737                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8738                 tw32(BUFMGR_MB_HIGH_WATER,
8739                      tp->bufmgr_config.mbuf_high_water);
8740         } else {
8741                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8742                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8743                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8744                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8745                 tw32(BUFMGR_MB_HIGH_WATER,
8746                      tp->bufmgr_config.mbuf_high_water_jumbo);
8747         }
8748         tw32(BUFMGR_DMA_LOW_WATER,
8749              tp->bufmgr_config.dma_low_water);
8750         tw32(BUFMGR_DMA_HIGH_WATER,
8751              tp->bufmgr_config.dma_high_water);
8752
8753         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8754         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8755                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8756         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8757             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8758             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8759                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8760         tw32(BUFMGR_MODE, val);
8761         for (i = 0; i < 2000; i++) {
8762                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8763                         break;
8764                 udelay(10);
8765         }
8766         if (i >= 2000) {
8767                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8768                 return -ENODEV;
8769         }
8770
8771         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8772                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8773
8774         tg3_setup_rxbd_thresholds(tp);
8775
8776         /* Initialize TG3_BDINFO's at:
8777          *  RCVDBDI_STD_BD:     standard eth size rx ring
8778          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8779          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8780          *
8781          * like so:
8782          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8783          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8784          *                              ring attribute flags
8785          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8786          *
8787          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8788          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8789          *
8790          * The size of each ring is fixed in the firmware, but the location is
8791          * configurable.
8792          */
8793         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8794              ((u64) tpr->rx_std_mapping >> 32));
8795         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8796              ((u64) tpr->rx_std_mapping & 0xffffffff));
8797         if (!tg3_flag(tp, 5717_PLUS))
8798                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8799                      NIC_SRAM_RX_BUFFER_DESC);
8800
8801         /* Disable the mini ring */
8802         if (!tg3_flag(tp, 5705_PLUS))
8803                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8804                      BDINFO_FLAGS_DISABLED);
8805
8806         /* Program the jumbo buffer descriptor ring control
8807          * blocks on those devices that have them.
8808          */
8809         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8810             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8811
8812                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8813                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8814                              ((u64) tpr->rx_jmb_mapping >> 32));
8815                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8816                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8817                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8818                               BDINFO_FLAGS_MAXLEN_SHIFT;
8819                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8820                              val | BDINFO_FLAGS_USE_EXT_RECV);
8821                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8822                             tg3_flag(tp, 57765_CLASS))
8823                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8824                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8825                 } else {
8826                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8827                              BDINFO_FLAGS_DISABLED);
8828                 }
8829
8830                 if (tg3_flag(tp, 57765_PLUS)) {
8831                         val = TG3_RX_STD_RING_SIZE(tp);
8832                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8833                         val |= (TG3_RX_STD_DMA_SZ << 2);
8834                 } else
8835                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8836         } else
8837                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8838
8839         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8840
8841         tpr->rx_std_prod_idx = tp->rx_pending;
8842         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8843
8844         tpr->rx_jmb_prod_idx =
8845                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8846         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8847
8848         tg3_rings_reset(tp);
8849
8850         /* Initialize MAC address and backoff seed. */
8851         __tg3_set_mac_addr(tp, 0);
8852
8853         /* MTU + ethernet header + FCS + optional VLAN tag */
8854         tw32(MAC_RX_MTU_SIZE,
8855              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8856
8857         /* The slot time is changed by tg3_setup_phy if we
8858          * run at gigabit with half duplex.
8859          */
8860         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8861               (6 << TX_LENGTHS_IPG_SHIFT) |
8862               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8863
8864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8865                 val |= tr32(MAC_TX_LENGTHS) &
8866                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8867                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8868
8869         tw32(MAC_TX_LENGTHS, val);
8870
8871         /* Receive rules. */
8872         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8873         tw32(RCVLPC_CONFIG, 0x0181);
8874
8875         /* Calculate RDMAC_MODE setting early, we need it to determine
8876          * the RCVLPC_STATE_ENABLE mask.
8877          */
8878         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8879                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8880                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8881                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8882                       RDMAC_MODE_LNGREAD_ENAB);
8883
8884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8885                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8886
8887         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8889             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8890                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8891                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8892                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8893
8894         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8895             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8896                 if (tg3_flag(tp, TSO_CAPABLE) &&
8897                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8898                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8899                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8900                            !tg3_flag(tp, IS_5788)) {
8901                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8902                 }
8903         }
8904
8905         if (tg3_flag(tp, PCI_EXPRESS))
8906                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8907
8908         if (tg3_flag(tp, HW_TSO_1) ||
8909             tg3_flag(tp, HW_TSO_2) ||
8910             tg3_flag(tp, HW_TSO_3))
8911                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8912
8913         if (tg3_flag(tp, 57765_PLUS) ||
8914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8916                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8917
8918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8919                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8920
8921         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8925             tg3_flag(tp, 57765_PLUS)) {
8926                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8927                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8928                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8929                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8930                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8931                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8932                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8933                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8934                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8935                 }
8936                 tw32(TG3_RDMA_RSRVCTRL_REG,
8937                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8938         }
8939
8940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8941             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8942                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8943                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8944                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8945                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8946         }
8947
8948         /* Receive/send statistics. */
8949         if (tg3_flag(tp, 5750_PLUS)) {
8950                 val = tr32(RCVLPC_STATS_ENABLE);
8951                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8952                 tw32(RCVLPC_STATS_ENABLE, val);
8953         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8954                    tg3_flag(tp, TSO_CAPABLE)) {
8955                 val = tr32(RCVLPC_STATS_ENABLE);
8956                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8957                 tw32(RCVLPC_STATS_ENABLE, val);
8958         } else {
8959                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8960         }
8961         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8962         tw32(SNDDATAI_STATSENAB, 0xffffff);
8963         tw32(SNDDATAI_STATSCTRL,
8964              (SNDDATAI_SCTRL_ENABLE |
8965               SNDDATAI_SCTRL_FASTUPD));
8966
8967         /* Setup host coalescing engine. */
8968         tw32(HOSTCC_MODE, 0);
8969         for (i = 0; i < 2000; i++) {
8970                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8971                         break;
8972                 udelay(10);
8973         }
8974
8975         __tg3_set_coalesce(tp, &tp->coal);
8976
8977         if (!tg3_flag(tp, 5705_PLUS)) {
8978                 /* Status/statistics block address.  See tg3_timer,
8979                  * the tg3_periodic_fetch_stats call there, and
8980                  * tg3_get_stats to see how this works for 5705/5750 chips.
8981                  */
8982                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8983                      ((u64) tp->stats_mapping >> 32));
8984                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8985                      ((u64) tp->stats_mapping & 0xffffffff));
8986                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8987
8988                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8989
8990                 /* Clear statistics and status block memory areas */
8991                 for (i = NIC_SRAM_STATS_BLK;
8992                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8993                      i += sizeof(u32)) {
8994                         tg3_write_mem(tp, i, 0);
8995                         udelay(40);
8996                 }
8997         }
8998
8999         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9000
9001         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9002         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9003         if (!tg3_flag(tp, 5705_PLUS))
9004                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9005
9006         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9007                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9008                 /* reset to prevent losing 1st rx packet intermittently */
9009                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9010                 udelay(10);
9011         }
9012
9013         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9014                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9015                         MAC_MODE_FHDE_ENABLE;
9016         if (tg3_flag(tp, ENABLE_APE))
9017                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9018         if (!tg3_flag(tp, 5705_PLUS) &&
9019             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9020             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9021                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9022         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9023         udelay(40);
9024
9025         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9026          * If TG3_FLAG_IS_NIC is zero, we should read the
9027          * register to preserve the GPIO settings for LOMs. The GPIOs,
9028          * whether used as inputs or outputs, are set by boot code after
9029          * reset.
9030          */
9031         if (!tg3_flag(tp, IS_NIC)) {
9032                 u32 gpio_mask;
9033
9034                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9035                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9036                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9037
9038                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9039                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9040                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9041
9042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9043                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9044
9045                 tp->grc_local_ctrl &= ~gpio_mask;
9046                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9047
9048                 /* GPIO1 must be driven high for eeprom write protect */
9049                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9050                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9051                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9052         }
9053         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9054         udelay(100);
9055
9056         if (tg3_flag(tp, USING_MSIX)) {
9057                 val = tr32(MSGINT_MODE);
9058                 val |= MSGINT_MODE_ENABLE;
9059                 if (tp->irq_cnt > 1)
9060                         val |= MSGINT_MODE_MULTIVEC_EN;
9061                 if (!tg3_flag(tp, 1SHOT_MSI))
9062                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9063                 tw32(MSGINT_MODE, val);
9064         }
9065
9066         if (!tg3_flag(tp, 5705_PLUS)) {
9067                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9068                 udelay(40);
9069         }
9070
9071         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9072                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9073                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9074                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9075                WDMAC_MODE_LNGREAD_ENAB);
9076
9077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9078             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9079                 if (tg3_flag(tp, TSO_CAPABLE) &&
9080                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9081                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9082                         /* nothing */
9083                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9084                            !tg3_flag(tp, IS_5788)) {
9085                         val |= WDMAC_MODE_RX_ACCEL;
9086                 }
9087         }
9088
9089         /* Enable host coalescing bug fix */
9090         if (tg3_flag(tp, 5755_PLUS))
9091                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9092
9093         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9094                 val |= WDMAC_MODE_BURST_ALL_DATA;
9095
9096         tw32_f(WDMAC_MODE, val);
9097         udelay(40);
9098
9099         if (tg3_flag(tp, PCIX_MODE)) {
9100                 u16 pcix_cmd;
9101
9102                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9103                                      &pcix_cmd);
9104                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9105                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9106                         pcix_cmd |= PCI_X_CMD_READ_2K;
9107                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9108                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9109                         pcix_cmd |= PCI_X_CMD_READ_2K;
9110                 }
9111                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9112                                       pcix_cmd);
9113         }
9114
9115         tw32_f(RDMAC_MODE, rdmac_mode);
9116         udelay(40);
9117
9118         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9119         if (!tg3_flag(tp, 5705_PLUS))
9120                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9121
9122         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9123                 tw32(SNDDATAC_MODE,
9124                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9125         else
9126                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9127
9128         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9129         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9130         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9131         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9132                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9133         tw32(RCVDBDI_MODE, val);
9134         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9135         if (tg3_flag(tp, HW_TSO_1) ||
9136             tg3_flag(tp, HW_TSO_2) ||
9137             tg3_flag(tp, HW_TSO_3))
9138                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9139         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9140         if (tg3_flag(tp, ENABLE_TSS))
9141                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9142         tw32(SNDBDI_MODE, val);
9143         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9144
9145         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9146                 err = tg3_load_5701_a0_firmware_fix(tp);
9147                 if (err)
9148                         return err;
9149         }
9150
9151         if (tg3_flag(tp, TSO_CAPABLE)) {
9152                 err = tg3_load_tso_firmware(tp);
9153                 if (err)
9154                         return err;
9155         }
9156
9157         tp->tx_mode = TX_MODE_ENABLE;
9158
9159         if (tg3_flag(tp, 5755_PLUS) ||
9160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9161                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9162
9163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9164                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9165                 tp->tx_mode &= ~val;
9166                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9167         }
9168
9169         tw32_f(MAC_TX_MODE, tp->tx_mode);
9170         udelay(100);
9171
9172         if (tg3_flag(tp, ENABLE_RSS)) {
9173                 tg3_rss_write_indir_tbl(tp);
9174
9175                 /* Setup the "secret" hash key. */
9176                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9177                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9178                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9179                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9180                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9181                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9182                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9183                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9184                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9185                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9186         }
9187
9188         tp->rx_mode = RX_MODE_ENABLE;
9189         if (tg3_flag(tp, 5755_PLUS))
9190                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9191
9192         if (tg3_flag(tp, ENABLE_RSS))
9193                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9194                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9195                                RX_MODE_RSS_IPV6_HASH_EN |
9196                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9197                                RX_MODE_RSS_IPV4_HASH_EN |
9198                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9199
9200         tw32_f(MAC_RX_MODE, tp->rx_mode);
9201         udelay(10);
9202
9203         tw32(MAC_LED_CTRL, tp->led_ctrl);
9204
9205         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9206         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9207                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9208                 udelay(10);
9209         }
9210         tw32_f(MAC_RX_MODE, tp->rx_mode);
9211         udelay(10);
9212
9213         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9214                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9215                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9216                         /* Set drive transmission level to 1.2V  */
9217                         /* only if the signal pre-emphasis bit is not set  */
9218                         val = tr32(MAC_SERDES_CFG);
9219                         val &= 0xfffff000;
9220                         val |= 0x880;
9221                         tw32(MAC_SERDES_CFG, val);
9222                 }
9223                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9224                         tw32(MAC_SERDES_CFG, 0x616000);
9225         }
9226
9227         /* Prevent chip from dropping frames when flow control
9228          * is enabled.
9229          */
9230         if (tg3_flag(tp, 57765_CLASS))
9231                 val = 1;
9232         else
9233                 val = 2;
9234         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9235
9236         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9237             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9238                 /* Use hardware link auto-negotiation */
9239                 tg3_flag_set(tp, HW_AUTONEG);
9240         }
9241
9242         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9243             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9244                 u32 tmp;
9245
9246                 tmp = tr32(SERDES_RX_CTRL);
9247                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9248                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9249                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9250                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9251         }
9252
9253         if (!tg3_flag(tp, USE_PHYLIB)) {
9254                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9255                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9256                         tp->link_config.speed = tp->link_config.orig_speed;
9257                         tp->link_config.duplex = tp->link_config.orig_duplex;
9258                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9259                 }
9260
9261                 err = tg3_setup_phy(tp, 0);
9262                 if (err)
9263                         return err;
9264
9265                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9266                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9267                         u32 tmp;
9268
9269                         /* Clear CRC stats. */
9270                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9271                                 tg3_writephy(tp, MII_TG3_TEST1,
9272                                              tmp | MII_TG3_TEST1_CRC_EN);
9273                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9274                         }
9275                 }
9276         }
9277
9278         __tg3_set_rx_mode(tp->dev);
9279
9280         /* Initialize receive rules. */
9281         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9282         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9283         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9284         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9285
9286         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9287                 limit = 8;
9288         else
9289                 limit = 16;
9290         if (tg3_flag(tp, ENABLE_ASF))
9291                 limit -= 4;
9292         switch (limit) {
9293         case 16:
9294                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9295         case 15:
9296                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9297         case 14:
9298                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9299         case 13:
9300                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9301         case 12:
9302                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9303         case 11:
9304                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9305         case 10:
9306                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9307         case 9:
9308                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9309         case 8:
9310                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9311         case 7:
9312                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9313         case 6:
9314                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9315         case 5:
9316                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9317         case 4:
9318                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9319         case 3:
9320                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9321         case 2:
9322         case 1:
9323
9324         default:
9325                 break;
9326         }
9327
9328         if (tg3_flag(tp, ENABLE_APE))
9329                 /* Write our heartbeat update interval to APE. */
9330                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9331                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9332
9333         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9334
9335         return 0;
9336 }
9337
9338 /* Called at device open time to get the chip ready for
9339  * packet processing.  Invoked with tp->lock held.
9340  */
9341 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9342 {
9343         tg3_switch_clocks(tp);
9344
9345         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9346
9347         return tg3_reset_hw(tp, reset_phy);
9348 }
9349
9350 /* Restart hardware after configuration changes, self-test, etc.
9351  * Invoked with tp->lock held.
9352  */
9353 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9354         __releases(tp->lock)
9355         __acquires(tp->lock)
9356 {
9357         int err;
9358
9359         err = tg3_init_hw(tp, reset_phy);
9360         if (err) {
9361                 netdev_err(tp->dev,
9362                            "Failed to re-initialize device, aborting\n");
9363                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9364                 tg3_full_unlock(tp);
9365                 del_timer_sync(&tp->timer);
9366                 tp->irq_sync = 0;
9367                 tg3_napi_enable(tp);
9368                 dev_close(tp->dev);
9369                 tg3_full_lock(tp, 0);
9370         }
9371         return err;
9372 }
9373
9374 static void tg3_reset_task(struct work_struct *work)
9375 {
9376         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9377         int err;
9378
9379         tg3_full_lock(tp, 0);
9380
9381         if (!netif_running(tp->dev)) {
9382                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9383                 tg3_full_unlock(tp);
9384                 return;
9385         }
9386
9387         tg3_full_unlock(tp);
9388
9389         tg3_phy_stop(tp);
9390
9391         tg3_netif_stop(tp);
9392
9393         tg3_full_lock(tp, 1);
9394
9395         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9396                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9397                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9398                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9399                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9400         }
9401
9402         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9403         err = tg3_init_hw(tp, 1);
9404         if (err)
9405                 goto out;
9406
9407         tg3_netif_start(tp);
9408
9409 out:
9410         tg3_full_unlock(tp);
9411
9412         if (!err)
9413                 tg3_phy_start(tp);
9414
9415         tg3_flag_clear(tp, RESET_TASK_PENDING);
9416 }
9417
9418 #define TG3_STAT_ADD32(PSTAT, REG) \
9419 do {    u32 __val = tr32(REG); \
9420         (PSTAT)->low += __val; \
9421         if ((PSTAT)->low < __val) \
9422                 (PSTAT)->high += 1; \
9423 } while (0)
9424
9425 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9426 {
9427         struct tg3_hw_stats *sp = tp->hw_stats;
9428
9429         if (!netif_carrier_ok(tp->dev))
9430                 return;
9431
9432         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9433         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9434         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9435         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9436         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9437         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9438         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9439         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9440         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9441         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9442         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9443         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9444         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9445
9446         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9447         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9448         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9449         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9450         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9451         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9452         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9453         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9454         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9455         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9456         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9457         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9458         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9459         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9460
9461         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9462         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9463             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9464             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9465                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9466         } else {
9467                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9468                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9469                 if (val) {
9470                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9471                         sp->rx_discards.low += val;
9472                         if (sp->rx_discards.low < val)
9473                                 sp->rx_discards.high += 1;
9474                 }
9475                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9476         }
9477         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9478 }
9479
9480 static void tg3_chk_missed_msi(struct tg3 *tp)
9481 {
9482         u32 i;
9483
9484         for (i = 0; i < tp->irq_cnt; i++) {
9485                 struct tg3_napi *tnapi = &tp->napi[i];
9486
9487                 if (tg3_has_work(tnapi)) {
9488                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9489                             tnapi->last_tx_cons == tnapi->tx_cons) {
9490                                 if (tnapi->chk_msi_cnt < 1) {
9491                                         tnapi->chk_msi_cnt++;
9492                                         return;
9493                                 }
9494                                 tg3_msi(0, tnapi);
9495                         }
9496                 }
9497                 tnapi->chk_msi_cnt = 0;
9498                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9499                 tnapi->last_tx_cons = tnapi->tx_cons;
9500         }
9501 }
9502
9503 static void tg3_timer(unsigned long __opaque)
9504 {
9505         struct tg3 *tp = (struct tg3 *) __opaque;
9506
9507         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9508                 goto restart_timer;
9509
9510         spin_lock(&tp->lock);
9511
9512         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9513             tg3_flag(tp, 57765_CLASS))
9514                 tg3_chk_missed_msi(tp);
9515
9516         if (!tg3_flag(tp, TAGGED_STATUS)) {
9517                 /* All of this garbage is because when using non-tagged
9518                  * IRQ status the mailbox/status_block protocol the chip
9519                  * uses with the cpu is race prone.
9520                  */
9521                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9522                         tw32(GRC_LOCAL_CTRL,
9523                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9524                 } else {
9525                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9526                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9527                 }
9528
9529                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9530                         spin_unlock(&tp->lock);
9531                         tg3_reset_task_schedule(tp);
9532                         goto restart_timer;
9533                 }
9534         }
9535
9536         /* This part only runs once per second. */
9537         if (!--tp->timer_counter) {
9538                 if (tg3_flag(tp, 5705_PLUS))
9539                         tg3_periodic_fetch_stats(tp);
9540
9541                 if (tp->setlpicnt && !--tp->setlpicnt)
9542                         tg3_phy_eee_enable(tp);
9543
9544                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9545                         u32 mac_stat;
9546                         int phy_event;
9547
9548                         mac_stat = tr32(MAC_STATUS);
9549
9550                         phy_event = 0;
9551                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9552                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9553                                         phy_event = 1;
9554                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9555                                 phy_event = 1;
9556
9557                         if (phy_event)
9558                                 tg3_setup_phy(tp, 0);
9559                 } else if (tg3_flag(tp, POLL_SERDES)) {
9560                         u32 mac_stat = tr32(MAC_STATUS);
9561                         int need_setup = 0;
9562
9563                         if (netif_carrier_ok(tp->dev) &&
9564                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9565                                 need_setup = 1;
9566                         }
9567                         if (!netif_carrier_ok(tp->dev) &&
9568                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9569                                          MAC_STATUS_SIGNAL_DET))) {
9570                                 need_setup = 1;
9571                         }
9572                         if (need_setup) {
9573                                 if (!tp->serdes_counter) {
9574                                         tw32_f(MAC_MODE,
9575                                              (tp->mac_mode &
9576                                               ~MAC_MODE_PORT_MODE_MASK));
9577                                         udelay(40);
9578                                         tw32_f(MAC_MODE, tp->mac_mode);
9579                                         udelay(40);
9580                                 }
9581                                 tg3_setup_phy(tp, 0);
9582                         }
9583                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9584                            tg3_flag(tp, 5780_CLASS)) {
9585                         tg3_serdes_parallel_detect(tp);
9586                 }
9587
9588                 tp->timer_counter = tp->timer_multiplier;
9589         }
9590
9591         /* Heartbeat is only sent once every 2 seconds.
9592          *
9593          * The heartbeat is to tell the ASF firmware that the host
9594          * driver is still alive.  In the event that the OS crashes,
9595          * ASF needs to reset the hardware to free up the FIFO space
9596          * that may be filled with rx packets destined for the host.
9597          * If the FIFO is full, ASF will no longer function properly.
9598          *
9599          * Unintended resets have been reported on real time kernels
9600          * where the timer doesn't run on time.  Netpoll will also have
9601          * same problem.
9602          *
9603          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9604          * to check the ring condition when the heartbeat is expiring
9605          * before doing the reset.  This will prevent most unintended
9606          * resets.
9607          */
9608         if (!--tp->asf_counter) {
9609                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9610                         tg3_wait_for_event_ack(tp);
9611
9612                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9613                                       FWCMD_NICDRV_ALIVE3);
9614                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9615                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9616                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9617
9618                         tg3_generate_fw_event(tp);
9619                 }
9620                 tp->asf_counter = tp->asf_multiplier;
9621         }
9622
9623         spin_unlock(&tp->lock);
9624
9625 restart_timer:
9626         tp->timer.expires = jiffies + tp->timer_offset;
9627         add_timer(&tp->timer);
9628 }
9629
9630 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9631 {
9632         irq_handler_t fn;
9633         unsigned long flags;
9634         char *name;
9635         struct tg3_napi *tnapi = &tp->napi[irq_num];
9636
9637         if (tp->irq_cnt == 1)
9638                 name = tp->dev->name;
9639         else {
9640                 name = &tnapi->irq_lbl[0];
9641                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9642                 name[IFNAMSIZ-1] = 0;
9643         }
9644
9645         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9646                 fn = tg3_msi;
9647                 if (tg3_flag(tp, 1SHOT_MSI))
9648                         fn = tg3_msi_1shot;
9649                 flags = 0;
9650         } else {
9651                 fn = tg3_interrupt;
9652                 if (tg3_flag(tp, TAGGED_STATUS))
9653                         fn = tg3_interrupt_tagged;
9654                 flags = IRQF_SHARED;
9655         }
9656
9657         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9658 }
9659
9660 static int tg3_test_interrupt(struct tg3 *tp)
9661 {
9662         struct tg3_napi *tnapi = &tp->napi[0];
9663         struct net_device *dev = tp->dev;
9664         int err, i, intr_ok = 0;
9665         u32 val;
9666
9667         if (!netif_running(dev))
9668                 return -ENODEV;
9669
9670         tg3_disable_ints(tp);
9671
9672         free_irq(tnapi->irq_vec, tnapi);
9673
9674         /*
9675          * Turn off MSI one shot mode.  Otherwise this test has no
9676          * observable way to know whether the interrupt was delivered.
9677          */
9678         if (tg3_flag(tp, 57765_PLUS)) {
9679                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9680                 tw32(MSGINT_MODE, val);
9681         }
9682
9683         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9684                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9685         if (err)
9686                 return err;
9687
9688         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9689         tg3_enable_ints(tp);
9690
9691         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9692                tnapi->coal_now);
9693
9694         for (i = 0; i < 5; i++) {
9695                 u32 int_mbox, misc_host_ctrl;
9696
9697                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9698                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9699
9700                 if ((int_mbox != 0) ||
9701                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9702                         intr_ok = 1;
9703                         break;
9704                 }
9705
9706                 if (tg3_flag(tp, 57765_PLUS) &&
9707                     tnapi->hw_status->status_tag != tnapi->last_tag)
9708                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9709
9710                 msleep(10);
9711         }
9712
9713         tg3_disable_ints(tp);
9714
9715         free_irq(tnapi->irq_vec, tnapi);
9716
9717         err = tg3_request_irq(tp, 0);
9718
9719         if (err)
9720                 return err;
9721
9722         if (intr_ok) {
9723                 /* Reenable MSI one shot mode. */
9724                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9725                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9726                         tw32(MSGINT_MODE, val);
9727                 }
9728                 return 0;
9729         }
9730
9731         return -EIO;
9732 }
9733
9734 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9735  * successfully restored
9736  */
9737 static int tg3_test_msi(struct tg3 *tp)
9738 {
9739         int err;
9740         u16 pci_cmd;
9741
9742         if (!tg3_flag(tp, USING_MSI))
9743                 return 0;
9744
9745         /* Turn off SERR reporting in case MSI terminates with Master
9746          * Abort.
9747          */
9748         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9749         pci_write_config_word(tp->pdev, PCI_COMMAND,
9750                               pci_cmd & ~PCI_COMMAND_SERR);
9751
9752         err = tg3_test_interrupt(tp);
9753
9754         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9755
9756         if (!err)
9757                 return 0;
9758
9759         /* other failures */
9760         if (err != -EIO)
9761                 return err;
9762
9763         /* MSI test failed, go back to INTx mode */
9764         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9765                     "to INTx mode. Please report this failure to the PCI "
9766                     "maintainer and include system chipset information\n");
9767
9768         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9769
9770         pci_disable_msi(tp->pdev);
9771
9772         tg3_flag_clear(tp, USING_MSI);
9773         tp->napi[0].irq_vec = tp->pdev->irq;
9774
9775         err = tg3_request_irq(tp, 0);
9776         if (err)
9777                 return err;
9778
9779         /* Need to reset the chip because the MSI cycle may have terminated
9780          * with Master Abort.
9781          */
9782         tg3_full_lock(tp, 1);
9783
9784         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9785         err = tg3_init_hw(tp, 1);
9786
9787         tg3_full_unlock(tp);
9788
9789         if (err)
9790                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9791
9792         return err;
9793 }
9794
9795 static int tg3_request_firmware(struct tg3 *tp)
9796 {
9797         const __be32 *fw_data;
9798
9799         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9800                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9801                            tp->fw_needed);
9802                 return -ENOENT;
9803         }
9804
9805         fw_data = (void *)tp->fw->data;
9806
9807         /* Firmware blob starts with version numbers, followed by
9808          * start address and _full_ length including BSS sections
9809          * (which must be longer than the actual data, of course
9810          */
9811
9812         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9813         if (tp->fw_len < (tp->fw->size - 12)) {
9814                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9815                            tp->fw_len, tp->fw_needed);
9816                 release_firmware(tp->fw);
9817                 tp->fw = NULL;
9818                 return -EINVAL;
9819         }
9820
9821         /* We no longer need firmware; we have it. */
9822         tp->fw_needed = NULL;
9823         return 0;
9824 }
9825
9826 static bool tg3_enable_msix(struct tg3 *tp)
9827 {
9828         int i, rc;
9829         struct msix_entry msix_ent[tp->irq_max];
9830
9831         tp->irq_cnt = num_online_cpus();
9832         if (tp->irq_cnt > 1) {
9833                 /* We want as many rx rings enabled as there are cpus.
9834                  * In multiqueue MSI-X mode, the first MSI-X vector
9835                  * only deals with link interrupts, etc, so we add
9836                  * one to the number of vectors we are requesting.
9837                  */
9838                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9839         }
9840
9841         for (i = 0; i < tp->irq_max; i++) {
9842                 msix_ent[i].entry  = i;
9843                 msix_ent[i].vector = 0;
9844         }
9845
9846         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9847         if (rc < 0) {
9848                 return false;
9849         } else if (rc != 0) {
9850                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9851                         return false;
9852                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9853                               tp->irq_cnt, rc);
9854                 tp->irq_cnt = rc;
9855         }
9856
9857         for (i = 0; i < tp->irq_max; i++)
9858                 tp->napi[i].irq_vec = msix_ent[i].vector;
9859
9860         netif_set_real_num_tx_queues(tp->dev, 1);
9861         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9862         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9863                 pci_disable_msix(tp->pdev);
9864                 return false;
9865         }
9866
9867         if (tp->irq_cnt > 1) {
9868                 tg3_flag_set(tp, ENABLE_RSS);
9869
9870                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9871                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9872                         tg3_flag_set(tp, ENABLE_TSS);
9873                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9874                 }
9875         }
9876
9877         return true;
9878 }
9879
9880 static void tg3_ints_init(struct tg3 *tp)
9881 {
9882         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9883             !tg3_flag(tp, TAGGED_STATUS)) {
9884                 /* All MSI supporting chips should support tagged
9885                  * status.  Assert that this is the case.
9886                  */
9887                 netdev_warn(tp->dev,
9888                             "MSI without TAGGED_STATUS? Not using MSI\n");
9889                 goto defcfg;
9890         }
9891
9892         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9893                 tg3_flag_set(tp, USING_MSIX);
9894         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9895                 tg3_flag_set(tp, USING_MSI);
9896
9897         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9898                 u32 msi_mode = tr32(MSGINT_MODE);
9899                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9900                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9901                 if (!tg3_flag(tp, 1SHOT_MSI))
9902                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9903                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9904         }
9905 defcfg:
9906         if (!tg3_flag(tp, USING_MSIX)) {
9907                 tp->irq_cnt = 1;
9908                 tp->napi[0].irq_vec = tp->pdev->irq;
9909                 netif_set_real_num_tx_queues(tp->dev, 1);
9910                 netif_set_real_num_rx_queues(tp->dev, 1);
9911         }
9912 }
9913
9914 static void tg3_ints_fini(struct tg3 *tp)
9915 {
9916         if (tg3_flag(tp, USING_MSIX))
9917                 pci_disable_msix(tp->pdev);
9918         else if (tg3_flag(tp, USING_MSI))
9919                 pci_disable_msi(tp->pdev);
9920         tg3_flag_clear(tp, USING_MSI);
9921         tg3_flag_clear(tp, USING_MSIX);
9922         tg3_flag_clear(tp, ENABLE_RSS);
9923         tg3_flag_clear(tp, ENABLE_TSS);
9924 }
9925
9926 static int tg3_open(struct net_device *dev)
9927 {
9928         struct tg3 *tp = netdev_priv(dev);
9929         int i, err;
9930
9931         if (tp->fw_needed) {
9932                 err = tg3_request_firmware(tp);
9933                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9934                         if (err)
9935                                 return err;
9936                 } else if (err) {
9937                         netdev_warn(tp->dev, "TSO capability disabled\n");
9938                         tg3_flag_clear(tp, TSO_CAPABLE);
9939                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9940                         netdev_notice(tp->dev, "TSO capability restored\n");
9941                         tg3_flag_set(tp, TSO_CAPABLE);
9942                 }
9943         }
9944
9945         netif_carrier_off(tp->dev);
9946
9947         err = tg3_power_up(tp);
9948         if (err)
9949                 return err;
9950
9951         tg3_full_lock(tp, 0);
9952
9953         tg3_disable_ints(tp);
9954         tg3_flag_clear(tp, INIT_COMPLETE);
9955
9956         tg3_full_unlock(tp);
9957
9958         /*
9959          * Setup interrupts first so we know how
9960          * many NAPI resources to allocate
9961          */
9962         tg3_ints_init(tp);
9963
9964         tg3_rss_check_indir_tbl(tp);
9965
9966         /* The placement of this call is tied
9967          * to the setup and use of Host TX descriptors.
9968          */
9969         err = tg3_alloc_consistent(tp);
9970         if (err)
9971                 goto err_out1;
9972
9973         tg3_napi_init(tp);
9974
9975         tg3_napi_enable(tp);
9976
9977         for (i = 0; i < tp->irq_cnt; i++) {
9978                 struct tg3_napi *tnapi = &tp->napi[i];
9979                 err = tg3_request_irq(tp, i);
9980                 if (err) {
9981                         for (i--; i >= 0; i--) {
9982                                 tnapi = &tp->napi[i];
9983                                 free_irq(tnapi->irq_vec, tnapi);
9984                         }
9985                         goto err_out2;
9986                 }
9987         }
9988
9989         tg3_full_lock(tp, 0);
9990
9991         err = tg3_init_hw(tp, 1);
9992         if (err) {
9993                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9994                 tg3_free_rings(tp);
9995         } else {
9996                 if (tg3_flag(tp, TAGGED_STATUS) &&
9997                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9998                     !tg3_flag(tp, 57765_CLASS))
9999                         tp->timer_offset = HZ;
10000                 else
10001                         tp->timer_offset = HZ / 10;
10002
10003                 BUG_ON(tp->timer_offset > HZ);
10004                 tp->timer_counter = tp->timer_multiplier =
10005                         (HZ / tp->timer_offset);
10006                 tp->asf_counter = tp->asf_multiplier =
10007                         ((HZ / tp->timer_offset) * 2);
10008
10009                 init_timer(&tp->timer);
10010                 tp->timer.expires = jiffies + tp->timer_offset;
10011                 tp->timer.data = (unsigned long) tp;
10012                 tp->timer.function = tg3_timer;
10013         }
10014
10015         tg3_full_unlock(tp);
10016
10017         if (err)
10018                 goto err_out3;
10019
10020         if (tg3_flag(tp, USING_MSI)) {
10021                 err = tg3_test_msi(tp);
10022
10023                 if (err) {
10024                         tg3_full_lock(tp, 0);
10025                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10026                         tg3_free_rings(tp);
10027                         tg3_full_unlock(tp);
10028
10029                         goto err_out2;
10030                 }
10031
10032                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10033                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10034
10035                         tw32(PCIE_TRANSACTION_CFG,
10036                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10037                 }
10038         }
10039
10040         tg3_phy_start(tp);
10041
10042         tg3_full_lock(tp, 0);
10043
10044         add_timer(&tp->timer);
10045         tg3_flag_set(tp, INIT_COMPLETE);
10046         tg3_enable_ints(tp);
10047
10048         tg3_full_unlock(tp);
10049
10050         netif_tx_start_all_queues(dev);
10051
10052         /*
10053          * Reset loopback feature if it was turned on while the device was down
10054          * make sure that it's installed properly now.
10055          */
10056         if (dev->features & NETIF_F_LOOPBACK)
10057                 tg3_set_loopback(dev, dev->features);
10058
10059         return 0;
10060
10061 err_out3:
10062         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10063                 struct tg3_napi *tnapi = &tp->napi[i];
10064                 free_irq(tnapi->irq_vec, tnapi);
10065         }
10066
10067 err_out2:
10068         tg3_napi_disable(tp);
10069         tg3_napi_fini(tp);
10070         tg3_free_consistent(tp);
10071
10072 err_out1:
10073         tg3_ints_fini(tp);
10074         tg3_frob_aux_power(tp, false);
10075         pci_set_power_state(tp->pdev, PCI_D3hot);
10076         return err;
10077 }
10078
10079 static int tg3_close(struct net_device *dev)
10080 {
10081         int i;
10082         struct tg3 *tp = netdev_priv(dev);
10083
10084         tg3_napi_disable(tp);
10085         tg3_reset_task_cancel(tp);
10086
10087         netif_tx_stop_all_queues(dev);
10088
10089         del_timer_sync(&tp->timer);
10090
10091         tg3_phy_stop(tp);
10092
10093         tg3_full_lock(tp, 1);
10094
10095         tg3_disable_ints(tp);
10096
10097         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10098         tg3_free_rings(tp);
10099         tg3_flag_clear(tp, INIT_COMPLETE);
10100
10101         tg3_full_unlock(tp);
10102
10103         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10104                 struct tg3_napi *tnapi = &tp->napi[i];
10105                 free_irq(tnapi->irq_vec, tnapi);
10106         }
10107
10108         tg3_ints_fini(tp);
10109
10110         /* Clear stats across close / open calls */
10111         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10112         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10113
10114         tg3_napi_fini(tp);
10115
10116         tg3_free_consistent(tp);
10117
10118         tg3_power_down(tp);
10119
10120         netif_carrier_off(tp->dev);
10121
10122         return 0;
10123 }
10124
10125 static inline u64 get_stat64(tg3_stat64_t *val)
10126 {
10127        return ((u64)val->high << 32) | ((u64)val->low);
10128 }
10129
10130 static u64 calc_crc_errors(struct tg3 *tp)
10131 {
10132         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10133
10134         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10135             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10136              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10137                 u32 val;
10138
10139                 spin_lock_bh(&tp->lock);
10140                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10141                         tg3_writephy(tp, MII_TG3_TEST1,
10142                                      val | MII_TG3_TEST1_CRC_EN);
10143                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10144                 } else
10145                         val = 0;
10146                 spin_unlock_bh(&tp->lock);
10147
10148                 tp->phy_crc_errors += val;
10149
10150                 return tp->phy_crc_errors;
10151         }
10152
10153         return get_stat64(&hw_stats->rx_fcs_errors);
10154 }
10155
10156 #define ESTAT_ADD(member) \
10157         estats->member =        old_estats->member + \
10158                                 get_stat64(&hw_stats->member)
10159
10160 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10161                                                struct tg3_ethtool_stats *estats)
10162 {
10163         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10164         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10165
10166         ESTAT_ADD(rx_octets);
10167         ESTAT_ADD(rx_fragments);
10168         ESTAT_ADD(rx_ucast_packets);
10169         ESTAT_ADD(rx_mcast_packets);
10170         ESTAT_ADD(rx_bcast_packets);
10171         ESTAT_ADD(rx_fcs_errors);
10172         ESTAT_ADD(rx_align_errors);
10173         ESTAT_ADD(rx_xon_pause_rcvd);
10174         ESTAT_ADD(rx_xoff_pause_rcvd);
10175         ESTAT_ADD(rx_mac_ctrl_rcvd);
10176         ESTAT_ADD(rx_xoff_entered);
10177         ESTAT_ADD(rx_frame_too_long_errors);
10178         ESTAT_ADD(rx_jabbers);
10179         ESTAT_ADD(rx_undersize_packets);
10180         ESTAT_ADD(rx_in_length_errors);
10181         ESTAT_ADD(rx_out_length_errors);
10182         ESTAT_ADD(rx_64_or_less_octet_packets);
10183         ESTAT_ADD(rx_65_to_127_octet_packets);
10184         ESTAT_ADD(rx_128_to_255_octet_packets);
10185         ESTAT_ADD(rx_256_to_511_octet_packets);
10186         ESTAT_ADD(rx_512_to_1023_octet_packets);
10187         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10188         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10189         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10190         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10191         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10192
10193         ESTAT_ADD(tx_octets);
10194         ESTAT_ADD(tx_collisions);
10195         ESTAT_ADD(tx_xon_sent);
10196         ESTAT_ADD(tx_xoff_sent);
10197         ESTAT_ADD(tx_flow_control);
10198         ESTAT_ADD(tx_mac_errors);
10199         ESTAT_ADD(tx_single_collisions);
10200         ESTAT_ADD(tx_mult_collisions);
10201         ESTAT_ADD(tx_deferred);
10202         ESTAT_ADD(tx_excessive_collisions);
10203         ESTAT_ADD(tx_late_collisions);
10204         ESTAT_ADD(tx_collide_2times);
10205         ESTAT_ADD(tx_collide_3times);
10206         ESTAT_ADD(tx_collide_4times);
10207         ESTAT_ADD(tx_collide_5times);
10208         ESTAT_ADD(tx_collide_6times);
10209         ESTAT_ADD(tx_collide_7times);
10210         ESTAT_ADD(tx_collide_8times);
10211         ESTAT_ADD(tx_collide_9times);
10212         ESTAT_ADD(tx_collide_10times);
10213         ESTAT_ADD(tx_collide_11times);
10214         ESTAT_ADD(tx_collide_12times);
10215         ESTAT_ADD(tx_collide_13times);
10216         ESTAT_ADD(tx_collide_14times);
10217         ESTAT_ADD(tx_collide_15times);
10218         ESTAT_ADD(tx_ucast_packets);
10219         ESTAT_ADD(tx_mcast_packets);
10220         ESTAT_ADD(tx_bcast_packets);
10221         ESTAT_ADD(tx_carrier_sense_errors);
10222         ESTAT_ADD(tx_discards);
10223         ESTAT_ADD(tx_errors);
10224
10225         ESTAT_ADD(dma_writeq_full);
10226         ESTAT_ADD(dma_write_prioq_full);
10227         ESTAT_ADD(rxbds_empty);
10228         ESTAT_ADD(rx_discards);
10229         ESTAT_ADD(rx_errors);
10230         ESTAT_ADD(rx_threshold_hit);
10231
10232         ESTAT_ADD(dma_readq_full);
10233         ESTAT_ADD(dma_read_prioq_full);
10234         ESTAT_ADD(tx_comp_queue_full);
10235
10236         ESTAT_ADD(ring_set_send_prod_index);
10237         ESTAT_ADD(ring_status_update);
10238         ESTAT_ADD(nic_irqs);
10239         ESTAT_ADD(nic_avoided_irqs);
10240         ESTAT_ADD(nic_tx_threshold_hit);
10241
10242         ESTAT_ADD(mbuf_lwm_thresh_hit);
10243
10244         return estats;
10245 }
10246
10247 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10248                                                  struct rtnl_link_stats64 *stats)
10249 {
10250         struct tg3 *tp = netdev_priv(dev);
10251         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10252         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10253
10254         if (!hw_stats)
10255                 return old_stats;
10256
10257         stats->rx_packets = old_stats->rx_packets +
10258                 get_stat64(&hw_stats->rx_ucast_packets) +
10259                 get_stat64(&hw_stats->rx_mcast_packets) +
10260                 get_stat64(&hw_stats->rx_bcast_packets);
10261
10262         stats->tx_packets = old_stats->tx_packets +
10263                 get_stat64(&hw_stats->tx_ucast_packets) +
10264                 get_stat64(&hw_stats->tx_mcast_packets) +
10265                 get_stat64(&hw_stats->tx_bcast_packets);
10266
10267         stats->rx_bytes = old_stats->rx_bytes +
10268                 get_stat64(&hw_stats->rx_octets);
10269         stats->tx_bytes = old_stats->tx_bytes +
10270                 get_stat64(&hw_stats->tx_octets);
10271
10272         stats->rx_errors = old_stats->rx_errors +
10273                 get_stat64(&hw_stats->rx_errors);
10274         stats->tx_errors = old_stats->tx_errors +
10275                 get_stat64(&hw_stats->tx_errors) +
10276                 get_stat64(&hw_stats->tx_mac_errors) +
10277                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10278                 get_stat64(&hw_stats->tx_discards);
10279
10280         stats->multicast = old_stats->multicast +
10281                 get_stat64(&hw_stats->rx_mcast_packets);
10282         stats->collisions = old_stats->collisions +
10283                 get_stat64(&hw_stats->tx_collisions);
10284
10285         stats->rx_length_errors = old_stats->rx_length_errors +
10286                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10287                 get_stat64(&hw_stats->rx_undersize_packets);
10288
10289         stats->rx_over_errors = old_stats->rx_over_errors +
10290                 get_stat64(&hw_stats->rxbds_empty);
10291         stats->rx_frame_errors = old_stats->rx_frame_errors +
10292                 get_stat64(&hw_stats->rx_align_errors);
10293         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10294                 get_stat64(&hw_stats->tx_discards);
10295         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10296                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10297
10298         stats->rx_crc_errors = old_stats->rx_crc_errors +
10299                 calc_crc_errors(tp);
10300
10301         stats->rx_missed_errors = old_stats->rx_missed_errors +
10302                 get_stat64(&hw_stats->rx_discards);
10303
10304         stats->rx_dropped = tp->rx_dropped;
10305         stats->tx_dropped = tp->tx_dropped;
10306
10307         return stats;
10308 }
10309
10310 static int tg3_get_regs_len(struct net_device *dev)
10311 {
10312         return TG3_REG_BLK_SIZE;
10313 }
10314
10315 static void tg3_get_regs(struct net_device *dev,
10316                 struct ethtool_regs *regs, void *_p)
10317 {
10318         struct tg3 *tp = netdev_priv(dev);
10319
10320         regs->version = 0;
10321
10322         memset(_p, 0, TG3_REG_BLK_SIZE);
10323
10324         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10325                 return;
10326
10327         tg3_full_lock(tp, 0);
10328
10329         tg3_dump_legacy_regs(tp, (u32 *)_p);
10330
10331         tg3_full_unlock(tp);
10332 }
10333
10334 static int tg3_get_eeprom_len(struct net_device *dev)
10335 {
10336         struct tg3 *tp = netdev_priv(dev);
10337
10338         return tp->nvram_size;
10339 }
10340
10341 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10342 {
10343         struct tg3 *tp = netdev_priv(dev);
10344         int ret;
10345         u8  *pd;
10346         u32 i, offset, len, b_offset, b_count;
10347         __be32 val;
10348
10349         if (tg3_flag(tp, NO_NVRAM))
10350                 return -EINVAL;
10351
10352         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10353                 return -EAGAIN;
10354
10355         offset = eeprom->offset;
10356         len = eeprom->len;
10357         eeprom->len = 0;
10358
10359         eeprom->magic = TG3_EEPROM_MAGIC;
10360
10361         if (offset & 3) {
10362                 /* adjustments to start on required 4 byte boundary */
10363                 b_offset = offset & 3;
10364                 b_count = 4 - b_offset;
10365                 if (b_count > len) {
10366                         /* i.e. offset=1 len=2 */
10367                         b_count = len;
10368                 }
10369                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10370                 if (ret)
10371                         return ret;
10372                 memcpy(data, ((char *)&val) + b_offset, b_count);
10373                 len -= b_count;
10374                 offset += b_count;
10375                 eeprom->len += b_count;
10376         }
10377
10378         /* read bytes up to the last 4 byte boundary */
10379         pd = &data[eeprom->len];
10380         for (i = 0; i < (len - (len & 3)); i += 4) {
10381                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10382                 if (ret) {
10383                         eeprom->len += i;
10384                         return ret;
10385                 }
10386                 memcpy(pd + i, &val, 4);
10387         }
10388         eeprom->len += i;
10389
10390         if (len & 3) {
10391                 /* read last bytes not ending on 4 byte boundary */
10392                 pd = &data[eeprom->len];
10393                 b_count = len & 3;
10394                 b_offset = offset + len - b_count;
10395                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10396                 if (ret)
10397                         return ret;
10398                 memcpy(pd, &val, b_count);
10399                 eeprom->len += b_count;
10400         }
10401         return 0;
10402 }
10403
10404 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10405 {
10406         struct tg3 *tp = netdev_priv(dev);
10407         int ret;
10408         u32 offset, len, b_offset, odd_len;
10409         u8 *buf;
10410         __be32 start, end;
10411
10412         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10413                 return -EAGAIN;
10414
10415         if (tg3_flag(tp, NO_NVRAM) ||
10416             eeprom->magic != TG3_EEPROM_MAGIC)
10417                 return -EINVAL;
10418
10419         offset = eeprom->offset;
10420         len = eeprom->len;
10421
10422         if ((b_offset = (offset & 3))) {
10423                 /* adjustments to start on required 4 byte boundary */
10424                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10425                 if (ret)
10426                         return ret;
10427                 len += b_offset;
10428                 offset &= ~3;
10429                 if (len < 4)
10430                         len = 4;
10431         }
10432
10433         odd_len = 0;
10434         if (len & 3) {
10435                 /* adjustments to end on required 4 byte boundary */
10436                 odd_len = 1;
10437                 len = (len + 3) & ~3;
10438                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10439                 if (ret)
10440                         return ret;
10441         }
10442
10443         buf = data;
10444         if (b_offset || odd_len) {
10445                 buf = kmalloc(len, GFP_KERNEL);
10446                 if (!buf)
10447                         return -ENOMEM;
10448                 if (b_offset)
10449                         memcpy(buf, &start, 4);
10450                 if (odd_len)
10451                         memcpy(buf+len-4, &end, 4);
10452                 memcpy(buf + b_offset, data, eeprom->len);
10453         }
10454
10455         ret = tg3_nvram_write_block(tp, offset, len, buf);
10456
10457         if (buf != data)
10458                 kfree(buf);
10459
10460         return ret;
10461 }
10462
10463 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10464 {
10465         struct tg3 *tp = netdev_priv(dev);
10466
10467         if (tg3_flag(tp, USE_PHYLIB)) {
10468                 struct phy_device *phydev;
10469                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10470                         return -EAGAIN;
10471                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10472                 return phy_ethtool_gset(phydev, cmd);
10473         }
10474
10475         cmd->supported = (SUPPORTED_Autoneg);
10476
10477         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10478                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10479                                    SUPPORTED_1000baseT_Full);
10480
10481         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10482                 cmd->supported |= (SUPPORTED_100baseT_Half |
10483                                   SUPPORTED_100baseT_Full |
10484                                   SUPPORTED_10baseT_Half |
10485                                   SUPPORTED_10baseT_Full |
10486                                   SUPPORTED_TP);
10487                 cmd->port = PORT_TP;
10488         } else {
10489                 cmd->supported |= SUPPORTED_FIBRE;
10490                 cmd->port = PORT_FIBRE;
10491         }
10492
10493         cmd->advertising = tp->link_config.advertising;
10494         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10495                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10496                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10497                                 cmd->advertising |= ADVERTISED_Pause;
10498                         } else {
10499                                 cmd->advertising |= ADVERTISED_Pause |
10500                                                     ADVERTISED_Asym_Pause;
10501                         }
10502                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10503                         cmd->advertising |= ADVERTISED_Asym_Pause;
10504                 }
10505         }
10506         if (netif_running(dev) && netif_carrier_ok(dev)) {
10507                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10508                 cmd->duplex = tp->link_config.active_duplex;
10509                 cmd->lp_advertising = tp->link_config.rmt_adv;
10510                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10511                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10512                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10513                         else
10514                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10515                 }
10516         } else {
10517                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10518                 cmd->duplex = DUPLEX_INVALID;
10519                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10520         }
10521         cmd->phy_address = tp->phy_addr;
10522         cmd->transceiver = XCVR_INTERNAL;
10523         cmd->autoneg = tp->link_config.autoneg;
10524         cmd->maxtxpkt = 0;
10525         cmd->maxrxpkt = 0;
10526         return 0;
10527 }
10528
10529 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10530 {
10531         struct tg3 *tp = netdev_priv(dev);
10532         u32 speed = ethtool_cmd_speed(cmd);
10533
10534         if (tg3_flag(tp, USE_PHYLIB)) {
10535                 struct phy_device *phydev;
10536                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10537                         return -EAGAIN;
10538                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10539                 return phy_ethtool_sset(phydev, cmd);
10540         }
10541
10542         if (cmd->autoneg != AUTONEG_ENABLE &&
10543             cmd->autoneg != AUTONEG_DISABLE)
10544                 return -EINVAL;
10545
10546         if (cmd->autoneg == AUTONEG_DISABLE &&
10547             cmd->duplex != DUPLEX_FULL &&
10548             cmd->duplex != DUPLEX_HALF)
10549                 return -EINVAL;
10550
10551         if (cmd->autoneg == AUTONEG_ENABLE) {
10552                 u32 mask = ADVERTISED_Autoneg |
10553                            ADVERTISED_Pause |
10554                            ADVERTISED_Asym_Pause;
10555
10556                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10557                         mask |= ADVERTISED_1000baseT_Half |
10558                                 ADVERTISED_1000baseT_Full;
10559
10560                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10561                         mask |= ADVERTISED_100baseT_Half |
10562                                 ADVERTISED_100baseT_Full |
10563                                 ADVERTISED_10baseT_Half |
10564                                 ADVERTISED_10baseT_Full |
10565                                 ADVERTISED_TP;
10566                 else
10567                         mask |= ADVERTISED_FIBRE;
10568
10569                 if (cmd->advertising & ~mask)
10570                         return -EINVAL;
10571
10572                 mask &= (ADVERTISED_1000baseT_Half |
10573                          ADVERTISED_1000baseT_Full |
10574                          ADVERTISED_100baseT_Half |
10575                          ADVERTISED_100baseT_Full |
10576                          ADVERTISED_10baseT_Half |
10577                          ADVERTISED_10baseT_Full);
10578
10579                 cmd->advertising &= mask;
10580         } else {
10581                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10582                         if (speed != SPEED_1000)
10583                                 return -EINVAL;
10584
10585                         if (cmd->duplex != DUPLEX_FULL)
10586                                 return -EINVAL;
10587                 } else {
10588                         if (speed != SPEED_100 &&
10589                             speed != SPEED_10)
10590                                 return -EINVAL;
10591                 }
10592         }
10593
10594         tg3_full_lock(tp, 0);
10595
10596         tp->link_config.autoneg = cmd->autoneg;
10597         if (cmd->autoneg == AUTONEG_ENABLE) {
10598                 tp->link_config.advertising = (cmd->advertising |
10599                                               ADVERTISED_Autoneg);
10600                 tp->link_config.speed = SPEED_INVALID;
10601                 tp->link_config.duplex = DUPLEX_INVALID;
10602         } else {
10603                 tp->link_config.advertising = 0;
10604                 tp->link_config.speed = speed;
10605                 tp->link_config.duplex = cmd->duplex;
10606         }
10607
10608         tp->link_config.orig_speed = tp->link_config.speed;
10609         tp->link_config.orig_duplex = tp->link_config.duplex;
10610         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10611
10612         if (netif_running(dev))
10613                 tg3_setup_phy(tp, 1);
10614
10615         tg3_full_unlock(tp);
10616
10617         return 0;
10618 }
10619
10620 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10621 {
10622         struct tg3 *tp = netdev_priv(dev);
10623
10624         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10625         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10626         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10627         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10628 }
10629
10630 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10631 {
10632         struct tg3 *tp = netdev_priv(dev);
10633
10634         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10635                 wol->supported = WAKE_MAGIC;
10636         else
10637                 wol->supported = 0;
10638         wol->wolopts = 0;
10639         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10640                 wol->wolopts = WAKE_MAGIC;
10641         memset(&wol->sopass, 0, sizeof(wol->sopass));
10642 }
10643
10644 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10645 {
10646         struct tg3 *tp = netdev_priv(dev);
10647         struct device *dp = &tp->pdev->dev;
10648
10649         if (wol->wolopts & ~WAKE_MAGIC)
10650                 return -EINVAL;
10651         if ((wol->wolopts & WAKE_MAGIC) &&
10652             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10653                 return -EINVAL;
10654
10655         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10656
10657         spin_lock_bh(&tp->lock);
10658         if (device_may_wakeup(dp))
10659                 tg3_flag_set(tp, WOL_ENABLE);
10660         else
10661                 tg3_flag_clear(tp, WOL_ENABLE);
10662         spin_unlock_bh(&tp->lock);
10663
10664         return 0;
10665 }
10666
10667 static u32 tg3_get_msglevel(struct net_device *dev)
10668 {
10669         struct tg3 *tp = netdev_priv(dev);
10670         return tp->msg_enable;
10671 }
10672
10673 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10674 {
10675         struct tg3 *tp = netdev_priv(dev);
10676         tp->msg_enable = value;
10677 }
10678
10679 static int tg3_nway_reset(struct net_device *dev)
10680 {
10681         struct tg3 *tp = netdev_priv(dev);
10682         int r;
10683
10684         if (!netif_running(dev))
10685                 return -EAGAIN;
10686
10687         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10688                 return -EINVAL;
10689
10690         if (tg3_flag(tp, USE_PHYLIB)) {
10691                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10692                         return -EAGAIN;
10693                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10694         } else {
10695                 u32 bmcr;
10696
10697                 spin_lock_bh(&tp->lock);
10698                 r = -EINVAL;
10699                 tg3_readphy(tp, MII_BMCR, &bmcr);
10700                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10701                     ((bmcr & BMCR_ANENABLE) ||
10702                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10703                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10704                                                    BMCR_ANENABLE);
10705                         r = 0;
10706                 }
10707                 spin_unlock_bh(&tp->lock);
10708         }
10709
10710         return r;
10711 }
10712
10713 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10714 {
10715         struct tg3 *tp = netdev_priv(dev);
10716
10717         ering->rx_max_pending = tp->rx_std_ring_mask;
10718         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10719                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10720         else
10721                 ering->rx_jumbo_max_pending = 0;
10722
10723         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10724
10725         ering->rx_pending = tp->rx_pending;
10726         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10727                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10728         else
10729                 ering->rx_jumbo_pending = 0;
10730
10731         ering->tx_pending = tp->napi[0].tx_pending;
10732 }
10733
10734 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10735 {
10736         struct tg3 *tp = netdev_priv(dev);
10737         int i, irq_sync = 0, err = 0;
10738
10739         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10740             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10741             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10742             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10743             (tg3_flag(tp, TSO_BUG) &&
10744              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10745                 return -EINVAL;
10746
10747         if (netif_running(dev)) {
10748                 tg3_phy_stop(tp);
10749                 tg3_netif_stop(tp);
10750                 irq_sync = 1;
10751         }
10752
10753         tg3_full_lock(tp, irq_sync);
10754
10755         tp->rx_pending = ering->rx_pending;
10756
10757         if (tg3_flag(tp, MAX_RXPEND_64) &&
10758             tp->rx_pending > 63)
10759                 tp->rx_pending = 63;
10760         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10761
10762         for (i = 0; i < tp->irq_max; i++)
10763                 tp->napi[i].tx_pending = ering->tx_pending;
10764
10765         if (netif_running(dev)) {
10766                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10767                 err = tg3_restart_hw(tp, 1);
10768                 if (!err)
10769                         tg3_netif_start(tp);
10770         }
10771
10772         tg3_full_unlock(tp);
10773
10774         if (irq_sync && !err)
10775                 tg3_phy_start(tp);
10776
10777         return err;
10778 }
10779
10780 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10781 {
10782         struct tg3 *tp = netdev_priv(dev);
10783
10784         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10785
10786         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10787                 epause->rx_pause = 1;
10788         else
10789                 epause->rx_pause = 0;
10790
10791         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10792                 epause->tx_pause = 1;
10793         else
10794                 epause->tx_pause = 0;
10795 }
10796
10797 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10798 {
10799         struct tg3 *tp = netdev_priv(dev);
10800         int err = 0;
10801
10802         if (tg3_flag(tp, USE_PHYLIB)) {
10803                 u32 newadv;
10804                 struct phy_device *phydev;
10805
10806                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10807
10808                 if (!(phydev->supported & SUPPORTED_Pause) ||
10809                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10810                      (epause->rx_pause != epause->tx_pause)))
10811                         return -EINVAL;
10812
10813                 tp->link_config.flowctrl = 0;
10814                 if (epause->rx_pause) {
10815                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10816
10817                         if (epause->tx_pause) {
10818                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10819                                 newadv = ADVERTISED_Pause;
10820                         } else
10821                                 newadv = ADVERTISED_Pause |
10822                                          ADVERTISED_Asym_Pause;
10823                 } else if (epause->tx_pause) {
10824                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10825                         newadv = ADVERTISED_Asym_Pause;
10826                 } else
10827                         newadv = 0;
10828
10829                 if (epause->autoneg)
10830                         tg3_flag_set(tp, PAUSE_AUTONEG);
10831                 else
10832                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10833
10834                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10835                         u32 oldadv = phydev->advertising &
10836                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10837                         if (oldadv != newadv) {
10838                                 phydev->advertising &=
10839                                         ~(ADVERTISED_Pause |
10840                                           ADVERTISED_Asym_Pause);
10841                                 phydev->advertising |= newadv;
10842                                 if (phydev->autoneg) {
10843                                         /*
10844                                          * Always renegotiate the link to
10845                                          * inform our link partner of our
10846                                          * flow control settings, even if the
10847                                          * flow control is forced.  Let
10848                                          * tg3_adjust_link() do the final
10849                                          * flow control setup.
10850                                          */
10851                                         return phy_start_aneg(phydev);
10852                                 }
10853                         }
10854
10855                         if (!epause->autoneg)
10856                                 tg3_setup_flow_control(tp, 0, 0);
10857                 } else {
10858                         tp->link_config.orig_advertising &=
10859                                         ~(ADVERTISED_Pause |
10860                                           ADVERTISED_Asym_Pause);
10861                         tp->link_config.orig_advertising |= newadv;
10862                 }
10863         } else {
10864                 int irq_sync = 0;
10865
10866                 if (netif_running(dev)) {
10867                         tg3_netif_stop(tp);
10868                         irq_sync = 1;
10869                 }
10870
10871                 tg3_full_lock(tp, irq_sync);
10872
10873                 if (epause->autoneg)
10874                         tg3_flag_set(tp, PAUSE_AUTONEG);
10875                 else
10876                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10877                 if (epause->rx_pause)
10878                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10879                 else
10880                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10881                 if (epause->tx_pause)
10882                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10883                 else
10884                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10885
10886                 if (netif_running(dev)) {
10887                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10888                         err = tg3_restart_hw(tp, 1);
10889                         if (!err)
10890                                 tg3_netif_start(tp);
10891                 }
10892
10893                 tg3_full_unlock(tp);
10894         }
10895
10896         return err;
10897 }
10898
10899 static int tg3_get_sset_count(struct net_device *dev, int sset)
10900 {
10901         switch (sset) {
10902         case ETH_SS_TEST:
10903                 return TG3_NUM_TEST;
10904         case ETH_SS_STATS:
10905                 return TG3_NUM_STATS;
10906         default:
10907                 return -EOPNOTSUPP;
10908         }
10909 }
10910
10911 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10912                          u32 *rules __always_unused)
10913 {
10914         struct tg3 *tp = netdev_priv(dev);
10915
10916         if (!tg3_flag(tp, SUPPORT_MSIX))
10917                 return -EOPNOTSUPP;
10918
10919         switch (info->cmd) {
10920         case ETHTOOL_GRXRINGS:
10921                 if (netif_running(tp->dev))
10922                         info->data = tp->irq_cnt;
10923                 else {
10924                         info->data = num_online_cpus();
10925                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10926                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10927                 }
10928
10929                 /* The first interrupt vector only
10930                  * handles link interrupts.
10931                  */
10932                 info->data -= 1;
10933                 return 0;
10934
10935         default:
10936                 return -EOPNOTSUPP;
10937         }
10938 }
10939
10940 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10941 {
10942         u32 size = 0;
10943         struct tg3 *tp = netdev_priv(dev);
10944
10945         if (tg3_flag(tp, SUPPORT_MSIX))
10946                 size = TG3_RSS_INDIR_TBL_SIZE;
10947
10948         return size;
10949 }
10950
10951 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10952 {
10953         struct tg3 *tp = netdev_priv(dev);
10954         int i;
10955
10956         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10957                 indir[i] = tp->rss_ind_tbl[i];
10958
10959         return 0;
10960 }
10961
10962 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10963 {
10964         struct tg3 *tp = netdev_priv(dev);
10965         size_t i;
10966
10967         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10968                 tp->rss_ind_tbl[i] = indir[i];
10969
10970         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10971                 return 0;
10972
10973         /* It is legal to write the indirection
10974          * table while the device is running.
10975          */
10976         tg3_full_lock(tp, 0);
10977         tg3_rss_write_indir_tbl(tp);
10978         tg3_full_unlock(tp);
10979
10980         return 0;
10981 }
10982
10983 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10984 {
10985         switch (stringset) {
10986         case ETH_SS_STATS:
10987                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10988                 break;
10989         case ETH_SS_TEST:
10990                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10991                 break;
10992         default:
10993                 WARN_ON(1);     /* we need a WARN() */
10994                 break;
10995         }
10996 }
10997
10998 static int tg3_set_phys_id(struct net_device *dev,
10999                             enum ethtool_phys_id_state state)
11000 {
11001         struct tg3 *tp = netdev_priv(dev);
11002
11003         if (!netif_running(tp->dev))
11004                 return -EAGAIN;
11005
11006         switch (state) {
11007         case ETHTOOL_ID_ACTIVE:
11008                 return 1;       /* cycle on/off once per second */
11009
11010         case ETHTOOL_ID_ON:
11011                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11012                      LED_CTRL_1000MBPS_ON |
11013                      LED_CTRL_100MBPS_ON |
11014                      LED_CTRL_10MBPS_ON |
11015                      LED_CTRL_TRAFFIC_OVERRIDE |
11016                      LED_CTRL_TRAFFIC_BLINK |
11017                      LED_CTRL_TRAFFIC_LED);
11018                 break;
11019
11020         case ETHTOOL_ID_OFF:
11021                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11022                      LED_CTRL_TRAFFIC_OVERRIDE);
11023                 break;
11024
11025         case ETHTOOL_ID_INACTIVE:
11026                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11027                 break;
11028         }
11029
11030         return 0;
11031 }
11032
11033 static void tg3_get_ethtool_stats(struct net_device *dev,
11034                                    struct ethtool_stats *estats, u64 *tmp_stats)
11035 {
11036         struct tg3 *tp = netdev_priv(dev);
11037
11038         if (tp->hw_stats)
11039                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11040         else
11041                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11042 }
11043
11044 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11045 {
11046         int i;
11047         __be32 *buf;
11048         u32 offset = 0, len = 0;
11049         u32 magic, val;
11050
11051         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11052                 return NULL;
11053
11054         if (magic == TG3_EEPROM_MAGIC) {
11055                 for (offset = TG3_NVM_DIR_START;
11056                      offset < TG3_NVM_DIR_END;
11057                      offset += TG3_NVM_DIRENT_SIZE) {
11058                         if (tg3_nvram_read(tp, offset, &val))
11059                                 return NULL;
11060
11061                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11062                             TG3_NVM_DIRTYPE_EXTVPD)
11063                                 break;
11064                 }
11065
11066                 if (offset != TG3_NVM_DIR_END) {
11067                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11068                         if (tg3_nvram_read(tp, offset + 4, &offset))
11069                                 return NULL;
11070
11071                         offset = tg3_nvram_logical_addr(tp, offset);
11072                 }
11073         }
11074
11075         if (!offset || !len) {
11076                 offset = TG3_NVM_VPD_OFF;
11077                 len = TG3_NVM_VPD_LEN;
11078         }
11079
11080         buf = kmalloc(len, GFP_KERNEL);
11081         if (buf == NULL)
11082                 return NULL;
11083
11084         if (magic == TG3_EEPROM_MAGIC) {
11085                 for (i = 0; i < len; i += 4) {
11086                         /* The data is in little-endian format in NVRAM.
11087                          * Use the big-endian read routines to preserve
11088                          * the byte order as it exists in NVRAM.
11089                          */
11090                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11091                                 goto error;
11092                 }
11093         } else {
11094                 u8 *ptr;
11095                 ssize_t cnt;
11096                 unsigned int pos = 0;
11097
11098                 ptr = (u8 *)&buf[0];
11099                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11100                         cnt = pci_read_vpd(tp->pdev, pos,
11101                                            len - pos, ptr);
11102                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11103                                 cnt = 0;
11104                         else if (cnt < 0)
11105                                 goto error;
11106                 }
11107                 if (pos != len)
11108                         goto error;
11109         }
11110
11111         *vpdlen = len;
11112
11113         return buf;
11114
11115 error:
11116         kfree(buf);
11117         return NULL;
11118 }
11119
11120 #define NVRAM_TEST_SIZE 0x100
11121 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11122 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11123 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11124 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11125 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11126 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11127 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11128 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11129
11130 static int tg3_test_nvram(struct tg3 *tp)
11131 {
11132         u32 csum, magic, len;
11133         __be32 *buf;
11134         int i, j, k, err = 0, size;
11135
11136         if (tg3_flag(tp, NO_NVRAM))
11137                 return 0;
11138
11139         if (tg3_nvram_read(tp, 0, &magic) != 0)
11140                 return -EIO;
11141
11142         if (magic == TG3_EEPROM_MAGIC)
11143                 size = NVRAM_TEST_SIZE;
11144         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11145                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11146                     TG3_EEPROM_SB_FORMAT_1) {
11147                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11148                         case TG3_EEPROM_SB_REVISION_0:
11149                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11150                                 break;
11151                         case TG3_EEPROM_SB_REVISION_2:
11152                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11153                                 break;
11154                         case TG3_EEPROM_SB_REVISION_3:
11155                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11156                                 break;
11157                         case TG3_EEPROM_SB_REVISION_4:
11158                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11159                                 break;
11160                         case TG3_EEPROM_SB_REVISION_5:
11161                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11162                                 break;
11163                         case TG3_EEPROM_SB_REVISION_6:
11164                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11165                                 break;
11166                         default:
11167                                 return -EIO;
11168                         }
11169                 } else
11170                         return 0;
11171         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11172                 size = NVRAM_SELFBOOT_HW_SIZE;
11173         else
11174                 return -EIO;
11175
11176         buf = kmalloc(size, GFP_KERNEL);
11177         if (buf == NULL)
11178                 return -ENOMEM;
11179
11180         err = -EIO;
11181         for (i = 0, j = 0; i < size; i += 4, j++) {
11182                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11183                 if (err)
11184                         break;
11185         }
11186         if (i < size)
11187                 goto out;
11188
11189         /* Selfboot format */
11190         magic = be32_to_cpu(buf[0]);
11191         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11192             TG3_EEPROM_MAGIC_FW) {
11193                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11194
11195                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11196                     TG3_EEPROM_SB_REVISION_2) {
11197                         /* For rev 2, the csum doesn't include the MBA. */
11198                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11199                                 csum8 += buf8[i];
11200                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11201                                 csum8 += buf8[i];
11202                 } else {
11203                         for (i = 0; i < size; i++)
11204                                 csum8 += buf8[i];
11205                 }
11206
11207                 if (csum8 == 0) {
11208                         err = 0;
11209                         goto out;
11210                 }
11211
11212                 err = -EIO;
11213                 goto out;
11214         }
11215
11216         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11217             TG3_EEPROM_MAGIC_HW) {
11218                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11219                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11220                 u8 *buf8 = (u8 *) buf;
11221
11222                 /* Separate the parity bits and the data bytes.  */
11223                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11224                         if ((i == 0) || (i == 8)) {
11225                                 int l;
11226                                 u8 msk;
11227
11228                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11229                                         parity[k++] = buf8[i] & msk;
11230                                 i++;
11231                         } else if (i == 16) {
11232                                 int l;
11233                                 u8 msk;
11234
11235                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11236                                         parity[k++] = buf8[i] & msk;
11237                                 i++;
11238
11239                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11240                                         parity[k++] = buf8[i] & msk;
11241                                 i++;
11242                         }
11243                         data[j++] = buf8[i];
11244                 }
11245
11246                 err = -EIO;
11247                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11248                         u8 hw8 = hweight8(data[i]);
11249
11250                         if ((hw8 & 0x1) && parity[i])
11251                                 goto out;
11252                         else if (!(hw8 & 0x1) && !parity[i])
11253                                 goto out;
11254                 }
11255                 err = 0;
11256                 goto out;
11257         }
11258
11259         err = -EIO;
11260
11261         /* Bootstrap checksum at offset 0x10 */
11262         csum = calc_crc((unsigned char *) buf, 0x10);
11263         if (csum != le32_to_cpu(buf[0x10/4]))
11264                 goto out;
11265
11266         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11267         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11268         if (csum != le32_to_cpu(buf[0xfc/4]))
11269                 goto out;
11270
11271         kfree(buf);
11272
11273         buf = tg3_vpd_readblock(tp, &len);
11274         if (!buf)
11275                 return -ENOMEM;
11276
11277         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11278         if (i > 0) {
11279                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11280                 if (j < 0)
11281                         goto out;
11282
11283                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11284                         goto out;
11285
11286                 i += PCI_VPD_LRDT_TAG_SIZE;
11287                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11288                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11289                 if (j > 0) {
11290                         u8 csum8 = 0;
11291
11292                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11293
11294                         for (i = 0; i <= j; i++)
11295                                 csum8 += ((u8 *)buf)[i];
11296
11297                         if (csum8)
11298                                 goto out;
11299                 }
11300         }
11301
11302         err = 0;
11303
11304 out:
11305         kfree(buf);
11306         return err;
11307 }
11308
11309 #define TG3_SERDES_TIMEOUT_SEC  2
11310 #define TG3_COPPER_TIMEOUT_SEC  6
11311
11312 static int tg3_test_link(struct tg3 *tp)
11313 {
11314         int i, max;
11315
11316         if (!netif_running(tp->dev))
11317                 return -ENODEV;
11318
11319         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11320                 max = TG3_SERDES_TIMEOUT_SEC;
11321         else
11322                 max = TG3_COPPER_TIMEOUT_SEC;
11323
11324         for (i = 0; i < max; i++) {
11325                 if (netif_carrier_ok(tp->dev))
11326                         return 0;
11327
11328                 if (msleep_interruptible(1000))
11329                         break;
11330         }
11331
11332         return -EIO;
11333 }
11334
11335 /* Only test the commonly used registers */
11336 static int tg3_test_registers(struct tg3 *tp)
11337 {
11338         int i, is_5705, is_5750;
11339         u32 offset, read_mask, write_mask, val, save_val, read_val;
11340         static struct {
11341                 u16 offset;
11342                 u16 flags;
11343 #define TG3_FL_5705     0x1
11344 #define TG3_FL_NOT_5705 0x2
11345 #define TG3_FL_NOT_5788 0x4
11346 #define TG3_FL_NOT_5750 0x8
11347                 u32 read_mask;
11348                 u32 write_mask;
11349         } reg_tbl[] = {
11350                 /* MAC Control Registers */
11351                 { MAC_MODE, TG3_FL_NOT_5705,
11352                         0x00000000, 0x00ef6f8c },
11353                 { MAC_MODE, TG3_FL_5705,
11354                         0x00000000, 0x01ef6b8c },
11355                 { MAC_STATUS, TG3_FL_NOT_5705,
11356                         0x03800107, 0x00000000 },
11357                 { MAC_STATUS, TG3_FL_5705,
11358                         0x03800100, 0x00000000 },
11359                 { MAC_ADDR_0_HIGH, 0x0000,
11360                         0x00000000, 0x0000ffff },
11361                 { MAC_ADDR_0_LOW, 0x0000,
11362                         0x00000000, 0xffffffff },
11363                 { MAC_RX_MTU_SIZE, 0x0000,
11364                         0x00000000, 0x0000ffff },
11365                 { MAC_TX_MODE, 0x0000,
11366                         0x00000000, 0x00000070 },
11367                 { MAC_TX_LENGTHS, 0x0000,
11368                         0x00000000, 0x00003fff },
11369                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11370                         0x00000000, 0x000007fc },
11371                 { MAC_RX_MODE, TG3_FL_5705,
11372                         0x00000000, 0x000007dc },
11373                 { MAC_HASH_REG_0, 0x0000,
11374                         0x00000000, 0xffffffff },
11375                 { MAC_HASH_REG_1, 0x0000,
11376                         0x00000000, 0xffffffff },
11377                 { MAC_HASH_REG_2, 0x0000,
11378                         0x00000000, 0xffffffff },
11379                 { MAC_HASH_REG_3, 0x0000,
11380                         0x00000000, 0xffffffff },
11381
11382                 /* Receive Data and Receive BD Initiator Control Registers. */
11383                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11384                         0x00000000, 0xffffffff },
11385                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11386                         0x00000000, 0xffffffff },
11387                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11388                         0x00000000, 0x00000003 },
11389                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11390                         0x00000000, 0xffffffff },
11391                 { RCVDBDI_STD_BD+0, 0x0000,
11392                         0x00000000, 0xffffffff },
11393                 { RCVDBDI_STD_BD+4, 0x0000,
11394                         0x00000000, 0xffffffff },
11395                 { RCVDBDI_STD_BD+8, 0x0000,
11396                         0x00000000, 0xffff0002 },
11397                 { RCVDBDI_STD_BD+0xc, 0x0000,
11398                         0x00000000, 0xffffffff },
11399
11400                 /* Receive BD Initiator Control Registers. */
11401                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11402                         0x00000000, 0xffffffff },
11403                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11404                         0x00000000, 0x000003ff },
11405                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11406                         0x00000000, 0xffffffff },
11407
11408                 /* Host Coalescing Control Registers. */
11409                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11410                         0x00000000, 0x00000004 },
11411                 { HOSTCC_MODE, TG3_FL_5705,
11412                         0x00000000, 0x000000f6 },
11413                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11414                         0x00000000, 0xffffffff },
11415                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11416                         0x00000000, 0x000003ff },
11417                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11418                         0x00000000, 0xffffffff },
11419                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11420                         0x00000000, 0x000003ff },
11421                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11422                         0x00000000, 0xffffffff },
11423                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11424                         0x00000000, 0x000000ff },
11425                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11428                         0x00000000, 0x000000ff },
11429                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11430                         0x00000000, 0xffffffff },
11431                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11432                         0x00000000, 0xffffffff },
11433                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11434                         0x00000000, 0xffffffff },
11435                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11436                         0x00000000, 0x000000ff },
11437                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11440                         0x00000000, 0x000000ff },
11441                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11442                         0x00000000, 0xffffffff },
11443                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11444                         0x00000000, 0xffffffff },
11445                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11446                         0x00000000, 0xffffffff },
11447                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11448                         0x00000000, 0xffffffff },
11449                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11450                         0x00000000, 0xffffffff },
11451                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11452                         0xffffffff, 0x00000000 },
11453                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11454                         0xffffffff, 0x00000000 },
11455
11456                 /* Buffer Manager Control Registers. */
11457                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11458                         0x00000000, 0x007fff80 },
11459                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11460                         0x00000000, 0x007fffff },
11461                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11462                         0x00000000, 0x0000003f },
11463                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11464                         0x00000000, 0x000001ff },
11465                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11466                         0x00000000, 0x000001ff },
11467                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11468                         0xffffffff, 0x00000000 },
11469                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11470                         0xffffffff, 0x00000000 },
11471
11472                 /* Mailbox Registers */
11473                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11474                         0x00000000, 0x000001ff },
11475                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11476                         0x00000000, 0x000001ff },
11477                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11478                         0x00000000, 0x000007ff },
11479                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11480                         0x00000000, 0x000001ff },
11481
11482                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11483         };
11484
11485         is_5705 = is_5750 = 0;
11486         if (tg3_flag(tp, 5705_PLUS)) {
11487                 is_5705 = 1;
11488                 if (tg3_flag(tp, 5750_PLUS))
11489                         is_5750 = 1;
11490         }
11491
11492         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11493                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11494                         continue;
11495
11496                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11497                         continue;
11498
11499                 if (tg3_flag(tp, IS_5788) &&
11500                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11501                         continue;
11502
11503                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11504                         continue;
11505
11506                 offset = (u32) reg_tbl[i].offset;
11507                 read_mask = reg_tbl[i].read_mask;
11508                 write_mask = reg_tbl[i].write_mask;
11509
11510                 /* Save the original register content */
11511                 save_val = tr32(offset);
11512
11513                 /* Determine the read-only value. */
11514                 read_val = save_val & read_mask;
11515
11516                 /* Write zero to the register, then make sure the read-only bits
11517                  * are not changed and the read/write bits are all zeros.
11518                  */
11519                 tw32(offset, 0);
11520
11521                 val = tr32(offset);
11522
11523                 /* Test the read-only and read/write bits. */
11524                 if (((val & read_mask) != read_val) || (val & write_mask))
11525                         goto out;
11526
11527                 /* Write ones to all the bits defined by RdMask and WrMask, then
11528                  * make sure the read-only bits are not changed and the
11529                  * read/write bits are all ones.
11530                  */
11531                 tw32(offset, read_mask | write_mask);
11532
11533                 val = tr32(offset);
11534
11535                 /* Test the read-only bits. */
11536                 if ((val & read_mask) != read_val)
11537                         goto out;
11538
11539                 /* Test the read/write bits. */
11540                 if ((val & write_mask) != write_mask)
11541                         goto out;
11542
11543                 tw32(offset, save_val);
11544         }
11545
11546         return 0;
11547
11548 out:
11549         if (netif_msg_hw(tp))
11550                 netdev_err(tp->dev,
11551                            "Register test failed at offset %x\n", offset);
11552         tw32(offset, save_val);
11553         return -EIO;
11554 }
11555
11556 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11557 {
11558         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11559         int i;
11560         u32 j;
11561
11562         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11563                 for (j = 0; j < len; j += 4) {
11564                         u32 val;
11565
11566                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11567                         tg3_read_mem(tp, offset + j, &val);
11568                         if (val != test_pattern[i])
11569                                 return -EIO;
11570                 }
11571         }
11572         return 0;
11573 }
11574
11575 static int tg3_test_memory(struct tg3 *tp)
11576 {
11577         static struct mem_entry {
11578                 u32 offset;
11579                 u32 len;
11580         } mem_tbl_570x[] = {
11581                 { 0x00000000, 0x00b50},
11582                 { 0x00002000, 0x1c000},
11583                 { 0xffffffff, 0x00000}
11584         }, mem_tbl_5705[] = {
11585                 { 0x00000100, 0x0000c},
11586                 { 0x00000200, 0x00008},
11587                 { 0x00004000, 0x00800},
11588                 { 0x00006000, 0x01000},
11589                 { 0x00008000, 0x02000},
11590                 { 0x00010000, 0x0e000},
11591                 { 0xffffffff, 0x00000}
11592         }, mem_tbl_5755[] = {
11593                 { 0x00000200, 0x00008},
11594                 { 0x00004000, 0x00800},
11595                 { 0x00006000, 0x00800},
11596                 { 0x00008000, 0x02000},
11597                 { 0x00010000, 0x0c000},
11598                 { 0xffffffff, 0x00000}
11599         }, mem_tbl_5906[] = {
11600                 { 0x00000200, 0x00008},
11601                 { 0x00004000, 0x00400},
11602                 { 0x00006000, 0x00400},
11603                 { 0x00008000, 0x01000},
11604                 { 0x00010000, 0x01000},
11605                 { 0xffffffff, 0x00000}
11606         }, mem_tbl_5717[] = {
11607                 { 0x00000200, 0x00008},
11608                 { 0x00010000, 0x0a000},
11609                 { 0x00020000, 0x13c00},
11610                 { 0xffffffff, 0x00000}
11611         }, mem_tbl_57765[] = {
11612                 { 0x00000200, 0x00008},
11613                 { 0x00004000, 0x00800},
11614                 { 0x00006000, 0x09800},
11615                 { 0x00010000, 0x0a000},
11616                 { 0xffffffff, 0x00000}
11617         };
11618         struct mem_entry *mem_tbl;
11619         int err = 0;
11620         int i;
11621
11622         if (tg3_flag(tp, 5717_PLUS))
11623                 mem_tbl = mem_tbl_5717;
11624         else if (tg3_flag(tp, 57765_CLASS))
11625                 mem_tbl = mem_tbl_57765;
11626         else if (tg3_flag(tp, 5755_PLUS))
11627                 mem_tbl = mem_tbl_5755;
11628         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11629                 mem_tbl = mem_tbl_5906;
11630         else if (tg3_flag(tp, 5705_PLUS))
11631                 mem_tbl = mem_tbl_5705;
11632         else
11633                 mem_tbl = mem_tbl_570x;
11634
11635         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11636                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11637                 if (err)
11638                         break;
11639         }
11640
11641         return err;
11642 }
11643
11644 #define TG3_TSO_MSS             500
11645
11646 #define TG3_TSO_IP_HDR_LEN      20
11647 #define TG3_TSO_TCP_HDR_LEN     20
11648 #define TG3_TSO_TCP_OPT_LEN     12
11649
11650 static const u8 tg3_tso_header[] = {
11651 0x08, 0x00,
11652 0x45, 0x00, 0x00, 0x00,
11653 0x00, 0x00, 0x40, 0x00,
11654 0x40, 0x06, 0x00, 0x00,
11655 0x0a, 0x00, 0x00, 0x01,
11656 0x0a, 0x00, 0x00, 0x02,
11657 0x0d, 0x00, 0xe0, 0x00,
11658 0x00, 0x00, 0x01, 0x00,
11659 0x00, 0x00, 0x02, 0x00,
11660 0x80, 0x10, 0x10, 0x00,
11661 0x14, 0x09, 0x00, 0x00,
11662 0x01, 0x01, 0x08, 0x0a,
11663 0x11, 0x11, 0x11, 0x11,
11664 0x11, 0x11, 0x11, 0x11,
11665 };
11666
11667 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11668 {
11669         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11670         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11671         u32 budget;
11672         struct sk_buff *skb;
11673         u8 *tx_data, *rx_data;
11674         dma_addr_t map;
11675         int num_pkts, tx_len, rx_len, i, err;
11676         struct tg3_rx_buffer_desc *desc;
11677         struct tg3_napi *tnapi, *rnapi;
11678         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11679
11680         tnapi = &tp->napi[0];
11681         rnapi = &tp->napi[0];
11682         if (tp->irq_cnt > 1) {
11683                 if (tg3_flag(tp, ENABLE_RSS))
11684                         rnapi = &tp->napi[1];
11685                 if (tg3_flag(tp, ENABLE_TSS))
11686                         tnapi = &tp->napi[1];
11687         }
11688         coal_now = tnapi->coal_now | rnapi->coal_now;
11689
11690         err = -EIO;
11691
11692         tx_len = pktsz;
11693         skb = netdev_alloc_skb(tp->dev, tx_len);
11694         if (!skb)
11695                 return -ENOMEM;
11696
11697         tx_data = skb_put(skb, tx_len);
11698         memcpy(tx_data, tp->dev->dev_addr, 6);
11699         memset(tx_data + 6, 0x0, 8);
11700
11701         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11702
11703         if (tso_loopback) {
11704                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11705
11706                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11707                               TG3_TSO_TCP_OPT_LEN;
11708
11709                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11710                        sizeof(tg3_tso_header));
11711                 mss = TG3_TSO_MSS;
11712
11713                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11714                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11715
11716                 /* Set the total length field in the IP header */
11717                 iph->tot_len = htons((u16)(mss + hdr_len));
11718
11719                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11720                               TXD_FLAG_CPU_POST_DMA);
11721
11722                 if (tg3_flag(tp, HW_TSO_1) ||
11723                     tg3_flag(tp, HW_TSO_2) ||
11724                     tg3_flag(tp, HW_TSO_3)) {
11725                         struct tcphdr *th;
11726                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11727                         th = (struct tcphdr *)&tx_data[val];
11728                         th->check = 0;
11729                 } else
11730                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11731
11732                 if (tg3_flag(tp, HW_TSO_3)) {
11733                         mss |= (hdr_len & 0xc) << 12;
11734                         if (hdr_len & 0x10)
11735                                 base_flags |= 0x00000010;
11736                         base_flags |= (hdr_len & 0x3e0) << 5;
11737                 } else if (tg3_flag(tp, HW_TSO_2))
11738                         mss |= hdr_len << 9;
11739                 else if (tg3_flag(tp, HW_TSO_1) ||
11740                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11741                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11742                 } else {
11743                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11744                 }
11745
11746                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11747         } else {
11748                 num_pkts = 1;
11749                 data_off = ETH_HLEN;
11750         }
11751
11752         for (i = data_off; i < tx_len; i++)
11753                 tx_data[i] = (u8) (i & 0xff);
11754
11755         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11756         if (pci_dma_mapping_error(tp->pdev, map)) {
11757                 dev_kfree_skb(skb);
11758                 return -EIO;
11759         }
11760
11761         val = tnapi->tx_prod;
11762         tnapi->tx_buffers[val].skb = skb;
11763         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11764
11765         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11766                rnapi->coal_now);
11767
11768         udelay(10);
11769
11770         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11771
11772         budget = tg3_tx_avail(tnapi);
11773         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11774                             base_flags | TXD_FLAG_END, mss, 0)) {
11775                 tnapi->tx_buffers[val].skb = NULL;
11776                 dev_kfree_skb(skb);
11777                 return -EIO;
11778         }
11779
11780         tnapi->tx_prod++;
11781
11782         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11783         tr32_mailbox(tnapi->prodmbox);
11784
11785         udelay(10);
11786
11787         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11788         for (i = 0; i < 35; i++) {
11789                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11790                        coal_now);
11791
11792                 udelay(10);
11793
11794                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11795                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11796                 if ((tx_idx == tnapi->tx_prod) &&
11797                     (rx_idx == (rx_start_idx + num_pkts)))
11798                         break;
11799         }
11800
11801         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11802         dev_kfree_skb(skb);
11803
11804         if (tx_idx != tnapi->tx_prod)
11805                 goto out;
11806
11807         if (rx_idx != rx_start_idx + num_pkts)
11808                 goto out;
11809
11810         val = data_off;
11811         while (rx_idx != rx_start_idx) {
11812                 desc = &rnapi->rx_rcb[rx_start_idx++];
11813                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11814                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11815
11816                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11817                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11818                         goto out;
11819
11820                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11821                          - ETH_FCS_LEN;
11822
11823                 if (!tso_loopback) {
11824                         if (rx_len != tx_len)
11825                                 goto out;
11826
11827                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11828                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11829                                         goto out;
11830                         } else {
11831                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11832                                         goto out;
11833                         }
11834                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11835                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11836                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11837                         goto out;
11838                 }
11839
11840                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11841                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11842                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11843                                              mapping);
11844                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11845                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11846                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11847                                              mapping);
11848                 } else
11849                         goto out;
11850
11851                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11852                                             PCI_DMA_FROMDEVICE);
11853
11854                 rx_data += TG3_RX_OFFSET(tp);
11855                 for (i = data_off; i < rx_len; i++, val++) {
11856                         if (*(rx_data + i) != (u8) (val & 0xff))
11857                                 goto out;
11858                 }
11859         }
11860
11861         err = 0;
11862
11863         /* tg3_free_rings will unmap and free the rx_data */
11864 out:
11865         return err;
11866 }
11867
11868 #define TG3_STD_LOOPBACK_FAILED         1
11869 #define TG3_JMB_LOOPBACK_FAILED         2
11870 #define TG3_TSO_LOOPBACK_FAILED         4
11871 #define TG3_LOOPBACK_FAILED \
11872         (TG3_STD_LOOPBACK_FAILED | \
11873          TG3_JMB_LOOPBACK_FAILED | \
11874          TG3_TSO_LOOPBACK_FAILED)
11875
11876 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11877 {
11878         int err = -EIO;
11879         u32 eee_cap;
11880
11881         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11882         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11883
11884         if (!netif_running(tp->dev)) {
11885                 data[0] = TG3_LOOPBACK_FAILED;
11886                 data[1] = TG3_LOOPBACK_FAILED;
11887                 if (do_extlpbk)
11888                         data[2] = TG3_LOOPBACK_FAILED;
11889                 goto done;
11890         }
11891
11892         err = tg3_reset_hw(tp, 1);
11893         if (err) {
11894                 data[0] = TG3_LOOPBACK_FAILED;
11895                 data[1] = TG3_LOOPBACK_FAILED;
11896                 if (do_extlpbk)
11897                         data[2] = TG3_LOOPBACK_FAILED;
11898                 goto done;
11899         }
11900
11901         if (tg3_flag(tp, ENABLE_RSS)) {
11902                 int i;
11903
11904                 /* Reroute all rx packets to the 1st queue */
11905                 for (i = MAC_RSS_INDIR_TBL_0;
11906                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11907                         tw32(i, 0x0);
11908         }
11909
11910         /* HW errata - mac loopback fails in some cases on 5780.
11911          * Normal traffic and PHY loopback are not affected by
11912          * errata.  Also, the MAC loopback test is deprecated for
11913          * all newer ASIC revisions.
11914          */
11915         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11916             !tg3_flag(tp, CPMU_PRESENT)) {
11917                 tg3_mac_loopback(tp, true);
11918
11919                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11920                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11921
11922                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11923                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11924                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11925
11926                 tg3_mac_loopback(tp, false);
11927         }
11928
11929         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11930             !tg3_flag(tp, USE_PHYLIB)) {
11931                 int i;
11932
11933                 tg3_phy_lpbk_set(tp, 0, false);
11934
11935                 /* Wait for link */
11936                 for (i = 0; i < 100; i++) {
11937                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11938                                 break;
11939                         mdelay(1);
11940                 }
11941
11942                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11943                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11944                 if (tg3_flag(tp, TSO_CAPABLE) &&
11945                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11946                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11947                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11948                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11949                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11950
11951                 if (do_extlpbk) {
11952                         tg3_phy_lpbk_set(tp, 0, true);
11953
11954                         /* All link indications report up, but the hardware
11955                          * isn't really ready for about 20 msec.  Double it
11956                          * to be sure.
11957                          */
11958                         mdelay(40);
11959
11960                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11961                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11962                         if (tg3_flag(tp, TSO_CAPABLE) &&
11963                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11964                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11965                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11966                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11967                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11968                 }
11969
11970                 /* Re-enable gphy autopowerdown. */
11971                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11972                         tg3_phy_toggle_apd(tp, true);
11973         }
11974
11975         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11976
11977 done:
11978         tp->phy_flags |= eee_cap;
11979
11980         return err;
11981 }
11982
11983 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11984                           u64 *data)
11985 {
11986         struct tg3 *tp = netdev_priv(dev);
11987         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11988
11989         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11990             tg3_power_up(tp)) {
11991                 etest->flags |= ETH_TEST_FL_FAILED;
11992                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11993                 return;
11994         }
11995
11996         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11997
11998         if (tg3_test_nvram(tp) != 0) {
11999                 etest->flags |= ETH_TEST_FL_FAILED;
12000                 data[0] = 1;
12001         }
12002         if (!doextlpbk && tg3_test_link(tp)) {
12003                 etest->flags |= ETH_TEST_FL_FAILED;
12004                 data[1] = 1;
12005         }
12006         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12007                 int err, err2 = 0, irq_sync = 0;
12008
12009                 if (netif_running(dev)) {
12010                         tg3_phy_stop(tp);
12011                         tg3_netif_stop(tp);
12012                         irq_sync = 1;
12013                 }
12014
12015                 tg3_full_lock(tp, irq_sync);
12016
12017                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12018                 err = tg3_nvram_lock(tp);
12019                 tg3_halt_cpu(tp, RX_CPU_BASE);
12020                 if (!tg3_flag(tp, 5705_PLUS))
12021                         tg3_halt_cpu(tp, TX_CPU_BASE);
12022                 if (!err)
12023                         tg3_nvram_unlock(tp);
12024
12025                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12026                         tg3_phy_reset(tp);
12027
12028                 if (tg3_test_registers(tp) != 0) {
12029                         etest->flags |= ETH_TEST_FL_FAILED;
12030                         data[2] = 1;
12031                 }
12032
12033                 if (tg3_test_memory(tp) != 0) {
12034                         etest->flags |= ETH_TEST_FL_FAILED;
12035                         data[3] = 1;
12036                 }
12037
12038                 if (doextlpbk)
12039                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12040
12041                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12042                         etest->flags |= ETH_TEST_FL_FAILED;
12043
12044                 tg3_full_unlock(tp);
12045
12046                 if (tg3_test_interrupt(tp) != 0) {
12047                         etest->flags |= ETH_TEST_FL_FAILED;
12048                         data[7] = 1;
12049                 }
12050
12051                 tg3_full_lock(tp, 0);
12052
12053                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12054                 if (netif_running(dev)) {
12055                         tg3_flag_set(tp, INIT_COMPLETE);
12056                         err2 = tg3_restart_hw(tp, 1);
12057                         if (!err2)
12058                                 tg3_netif_start(tp);
12059                 }
12060
12061                 tg3_full_unlock(tp);
12062
12063                 if (irq_sync && !err2)
12064                         tg3_phy_start(tp);
12065         }
12066         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12067                 tg3_power_down(tp);
12068
12069 }
12070
12071 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12072 {
12073         struct mii_ioctl_data *data = if_mii(ifr);
12074         struct tg3 *tp = netdev_priv(dev);
12075         int err;
12076
12077         if (tg3_flag(tp, USE_PHYLIB)) {
12078                 struct phy_device *phydev;
12079                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12080                         return -EAGAIN;
12081                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12082                 return phy_mii_ioctl(phydev, ifr, cmd);
12083         }
12084
12085         switch (cmd) {
12086         case SIOCGMIIPHY:
12087                 data->phy_id = tp->phy_addr;
12088
12089                 /* fallthru */
12090         case SIOCGMIIREG: {
12091                 u32 mii_regval;
12092
12093                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12094                         break;                  /* We have no PHY */
12095
12096                 if (!netif_running(dev))
12097                         return -EAGAIN;
12098
12099                 spin_lock_bh(&tp->lock);
12100                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12101                 spin_unlock_bh(&tp->lock);
12102
12103                 data->val_out = mii_regval;
12104
12105                 return err;
12106         }
12107
12108         case SIOCSMIIREG:
12109                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12110                         break;                  /* We have no PHY */
12111
12112                 if (!netif_running(dev))
12113                         return -EAGAIN;
12114
12115                 spin_lock_bh(&tp->lock);
12116                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12117                 spin_unlock_bh(&tp->lock);
12118
12119                 return err;
12120
12121         default:
12122                 /* do nothing */
12123                 break;
12124         }
12125         return -EOPNOTSUPP;
12126 }
12127
12128 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12129 {
12130         struct tg3 *tp = netdev_priv(dev);
12131
12132         memcpy(ec, &tp->coal, sizeof(*ec));
12133         return 0;
12134 }
12135
12136 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12137 {
12138         struct tg3 *tp = netdev_priv(dev);
12139         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12140         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12141
12142         if (!tg3_flag(tp, 5705_PLUS)) {
12143                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12144                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12145                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12146                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12147         }
12148
12149         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12150             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12151             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12152             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12153             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12154             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12155             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12156             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12157             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12158             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12159                 return -EINVAL;
12160
12161         /* No rx interrupts will be generated if both are zero */
12162         if ((ec->rx_coalesce_usecs == 0) &&
12163             (ec->rx_max_coalesced_frames == 0))
12164                 return -EINVAL;
12165
12166         /* No tx interrupts will be generated if both are zero */
12167         if ((ec->tx_coalesce_usecs == 0) &&
12168             (ec->tx_max_coalesced_frames == 0))
12169                 return -EINVAL;
12170
12171         /* Only copy relevant parameters, ignore all others. */
12172         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12173         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12174         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12175         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12176         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12177         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12178         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12179         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12180         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12181
12182         if (netif_running(dev)) {
12183                 tg3_full_lock(tp, 0);
12184                 __tg3_set_coalesce(tp, &tp->coal);
12185                 tg3_full_unlock(tp);
12186         }
12187         return 0;
12188 }
12189
12190 static const struct ethtool_ops tg3_ethtool_ops = {
12191         .get_settings           = tg3_get_settings,
12192         .set_settings           = tg3_set_settings,
12193         .get_drvinfo            = tg3_get_drvinfo,
12194         .get_regs_len           = tg3_get_regs_len,
12195         .get_regs               = tg3_get_regs,
12196         .get_wol                = tg3_get_wol,
12197         .set_wol                = tg3_set_wol,
12198         .get_msglevel           = tg3_get_msglevel,
12199         .set_msglevel           = tg3_set_msglevel,
12200         .nway_reset             = tg3_nway_reset,
12201         .get_link               = ethtool_op_get_link,
12202         .get_eeprom_len         = tg3_get_eeprom_len,
12203         .get_eeprom             = tg3_get_eeprom,
12204         .set_eeprom             = tg3_set_eeprom,
12205         .get_ringparam          = tg3_get_ringparam,
12206         .set_ringparam          = tg3_set_ringparam,
12207         .get_pauseparam         = tg3_get_pauseparam,
12208         .set_pauseparam         = tg3_set_pauseparam,
12209         .self_test              = tg3_self_test,
12210         .get_strings            = tg3_get_strings,
12211         .set_phys_id            = tg3_set_phys_id,
12212         .get_ethtool_stats      = tg3_get_ethtool_stats,
12213         .get_coalesce           = tg3_get_coalesce,
12214         .set_coalesce           = tg3_set_coalesce,
12215         .get_sset_count         = tg3_get_sset_count,
12216         .get_rxnfc              = tg3_get_rxnfc,
12217         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12218         .get_rxfh_indir         = tg3_get_rxfh_indir,
12219         .set_rxfh_indir         = tg3_set_rxfh_indir,
12220 };
12221
12222 static void tg3_set_rx_mode(struct net_device *dev)
12223 {
12224         struct tg3 *tp = netdev_priv(dev);
12225
12226         if (!netif_running(dev))
12227                 return;
12228
12229         tg3_full_lock(tp, 0);
12230         __tg3_set_rx_mode(dev);
12231         tg3_full_unlock(tp);
12232 }
12233
12234 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12235                                int new_mtu)
12236 {
12237         dev->mtu = new_mtu;
12238
12239         if (new_mtu > ETH_DATA_LEN) {
12240                 if (tg3_flag(tp, 5780_CLASS)) {
12241                         netdev_update_features(dev);
12242                         tg3_flag_clear(tp, TSO_CAPABLE);
12243                 } else {
12244                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12245                 }
12246         } else {
12247                 if (tg3_flag(tp, 5780_CLASS)) {
12248                         tg3_flag_set(tp, TSO_CAPABLE);
12249                         netdev_update_features(dev);
12250                 }
12251                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12252         }
12253 }
12254
12255 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12256 {
12257         struct tg3 *tp = netdev_priv(dev);
12258         int err;
12259
12260         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12261                 return -EINVAL;
12262
12263         if (!netif_running(dev)) {
12264                 /* We'll just catch it later when the
12265                  * device is up'd.
12266                  */
12267                 tg3_set_mtu(dev, tp, new_mtu);
12268                 return 0;
12269         }
12270
12271         tg3_phy_stop(tp);
12272
12273         tg3_netif_stop(tp);
12274
12275         tg3_full_lock(tp, 1);
12276
12277         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12278
12279         tg3_set_mtu(dev, tp, new_mtu);
12280
12281         err = tg3_restart_hw(tp, 0);
12282
12283         if (!err)
12284                 tg3_netif_start(tp);
12285
12286         tg3_full_unlock(tp);
12287
12288         if (!err)
12289                 tg3_phy_start(tp);
12290
12291         return err;
12292 }
12293
12294 static const struct net_device_ops tg3_netdev_ops = {
12295         .ndo_open               = tg3_open,
12296         .ndo_stop               = tg3_close,
12297         .ndo_start_xmit         = tg3_start_xmit,
12298         .ndo_get_stats64        = tg3_get_stats64,
12299         .ndo_validate_addr      = eth_validate_addr,
12300         .ndo_set_rx_mode        = tg3_set_rx_mode,
12301         .ndo_set_mac_address    = tg3_set_mac_addr,
12302         .ndo_do_ioctl           = tg3_ioctl,
12303         .ndo_tx_timeout         = tg3_tx_timeout,
12304         .ndo_change_mtu         = tg3_change_mtu,
12305         .ndo_fix_features       = tg3_fix_features,
12306         .ndo_set_features       = tg3_set_features,
12307 #ifdef CONFIG_NET_POLL_CONTROLLER
12308         .ndo_poll_controller    = tg3_poll_controller,
12309 #endif
12310 };
12311
12312 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12313 {
12314         u32 cursize, val, magic;
12315
12316         tp->nvram_size = EEPROM_CHIP_SIZE;
12317
12318         if (tg3_nvram_read(tp, 0, &magic) != 0)
12319                 return;
12320
12321         if ((magic != TG3_EEPROM_MAGIC) &&
12322             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12323             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12324                 return;
12325
12326         /*
12327          * Size the chip by reading offsets at increasing powers of two.
12328          * When we encounter our validation signature, we know the addressing
12329          * has wrapped around, and thus have our chip size.
12330          */
12331         cursize = 0x10;
12332
12333         while (cursize < tp->nvram_size) {
12334                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12335                         return;
12336
12337                 if (val == magic)
12338                         break;
12339
12340                 cursize <<= 1;
12341         }
12342
12343         tp->nvram_size = cursize;
12344 }
12345
12346 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12347 {
12348         u32 val;
12349
12350         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12351                 return;
12352
12353         /* Selfboot format */
12354         if (val != TG3_EEPROM_MAGIC) {
12355                 tg3_get_eeprom_size(tp);
12356                 return;
12357         }
12358
12359         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12360                 if (val != 0) {
12361                         /* This is confusing.  We want to operate on the
12362                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12363                          * call will read from NVRAM and byteswap the data
12364                          * according to the byteswapping settings for all
12365                          * other register accesses.  This ensures the data we
12366                          * want will always reside in the lower 16-bits.
12367                          * However, the data in NVRAM is in LE format, which
12368                          * means the data from the NVRAM read will always be
12369                          * opposite the endianness of the CPU.  The 16-bit
12370                          * byteswap then brings the data to CPU endianness.
12371                          */
12372                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12373                         return;
12374                 }
12375         }
12376         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12377 }
12378
12379 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12380 {
12381         u32 nvcfg1;
12382
12383         nvcfg1 = tr32(NVRAM_CFG1);
12384         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12385                 tg3_flag_set(tp, FLASH);
12386         } else {
12387                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12388                 tw32(NVRAM_CFG1, nvcfg1);
12389         }
12390
12391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12392             tg3_flag(tp, 5780_CLASS)) {
12393                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12394                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12395                         tp->nvram_jedecnum = JEDEC_ATMEL;
12396                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12397                         tg3_flag_set(tp, NVRAM_BUFFERED);
12398                         break;
12399                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12400                         tp->nvram_jedecnum = JEDEC_ATMEL;
12401                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12402                         break;
12403                 case FLASH_VENDOR_ATMEL_EEPROM:
12404                         tp->nvram_jedecnum = JEDEC_ATMEL;
12405                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12406                         tg3_flag_set(tp, NVRAM_BUFFERED);
12407                         break;
12408                 case FLASH_VENDOR_ST:
12409                         tp->nvram_jedecnum = JEDEC_ST;
12410                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12411                         tg3_flag_set(tp, NVRAM_BUFFERED);
12412                         break;
12413                 case FLASH_VENDOR_SAIFUN:
12414                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12415                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12416                         break;
12417                 case FLASH_VENDOR_SST_SMALL:
12418                 case FLASH_VENDOR_SST_LARGE:
12419                         tp->nvram_jedecnum = JEDEC_SST;
12420                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12421                         break;
12422                 }
12423         } else {
12424                 tp->nvram_jedecnum = JEDEC_ATMEL;
12425                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12426                 tg3_flag_set(tp, NVRAM_BUFFERED);
12427         }
12428 }
12429
12430 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12431 {
12432         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12433         case FLASH_5752PAGE_SIZE_256:
12434                 tp->nvram_pagesize = 256;
12435                 break;
12436         case FLASH_5752PAGE_SIZE_512:
12437                 tp->nvram_pagesize = 512;
12438                 break;
12439         case FLASH_5752PAGE_SIZE_1K:
12440                 tp->nvram_pagesize = 1024;
12441                 break;
12442         case FLASH_5752PAGE_SIZE_2K:
12443                 tp->nvram_pagesize = 2048;
12444                 break;
12445         case FLASH_5752PAGE_SIZE_4K:
12446                 tp->nvram_pagesize = 4096;
12447                 break;
12448         case FLASH_5752PAGE_SIZE_264:
12449                 tp->nvram_pagesize = 264;
12450                 break;
12451         case FLASH_5752PAGE_SIZE_528:
12452                 tp->nvram_pagesize = 528;
12453                 break;
12454         }
12455 }
12456
12457 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12458 {
12459         u32 nvcfg1;
12460
12461         nvcfg1 = tr32(NVRAM_CFG1);
12462
12463         /* NVRAM protection for TPM */
12464         if (nvcfg1 & (1 << 27))
12465                 tg3_flag_set(tp, PROTECTED_NVRAM);
12466
12467         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12468         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12469         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12470                 tp->nvram_jedecnum = JEDEC_ATMEL;
12471                 tg3_flag_set(tp, NVRAM_BUFFERED);
12472                 break;
12473         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12474                 tp->nvram_jedecnum = JEDEC_ATMEL;
12475                 tg3_flag_set(tp, NVRAM_BUFFERED);
12476                 tg3_flag_set(tp, FLASH);
12477                 break;
12478         case FLASH_5752VENDOR_ST_M45PE10:
12479         case FLASH_5752VENDOR_ST_M45PE20:
12480         case FLASH_5752VENDOR_ST_M45PE40:
12481                 tp->nvram_jedecnum = JEDEC_ST;
12482                 tg3_flag_set(tp, NVRAM_BUFFERED);
12483                 tg3_flag_set(tp, FLASH);
12484                 break;
12485         }
12486
12487         if (tg3_flag(tp, FLASH)) {
12488                 tg3_nvram_get_pagesize(tp, nvcfg1);
12489         } else {
12490                 /* For eeprom, set pagesize to maximum eeprom size */
12491                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12492
12493                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12494                 tw32(NVRAM_CFG1, nvcfg1);
12495         }
12496 }
12497
12498 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12499 {
12500         u32 nvcfg1, protect = 0;
12501
12502         nvcfg1 = tr32(NVRAM_CFG1);
12503
12504         /* NVRAM protection for TPM */
12505         if (nvcfg1 & (1 << 27)) {
12506                 tg3_flag_set(tp, PROTECTED_NVRAM);
12507                 protect = 1;
12508         }
12509
12510         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12511         switch (nvcfg1) {
12512         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12513         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12514         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12515         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12516                 tp->nvram_jedecnum = JEDEC_ATMEL;
12517                 tg3_flag_set(tp, NVRAM_BUFFERED);
12518                 tg3_flag_set(tp, FLASH);
12519                 tp->nvram_pagesize = 264;
12520                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12521                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12522                         tp->nvram_size = (protect ? 0x3e200 :
12523                                           TG3_NVRAM_SIZE_512KB);
12524                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12525                         tp->nvram_size = (protect ? 0x1f200 :
12526                                           TG3_NVRAM_SIZE_256KB);
12527                 else
12528                         tp->nvram_size = (protect ? 0x1f200 :
12529                                           TG3_NVRAM_SIZE_128KB);
12530                 break;
12531         case FLASH_5752VENDOR_ST_M45PE10:
12532         case FLASH_5752VENDOR_ST_M45PE20:
12533         case FLASH_5752VENDOR_ST_M45PE40:
12534                 tp->nvram_jedecnum = JEDEC_ST;
12535                 tg3_flag_set(tp, NVRAM_BUFFERED);
12536                 tg3_flag_set(tp, FLASH);
12537                 tp->nvram_pagesize = 256;
12538                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12539                         tp->nvram_size = (protect ?
12540                                           TG3_NVRAM_SIZE_64KB :
12541                                           TG3_NVRAM_SIZE_128KB);
12542                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12543                         tp->nvram_size = (protect ?
12544                                           TG3_NVRAM_SIZE_64KB :
12545                                           TG3_NVRAM_SIZE_256KB);
12546                 else
12547                         tp->nvram_size = (protect ?
12548                                           TG3_NVRAM_SIZE_128KB :
12549                                           TG3_NVRAM_SIZE_512KB);
12550                 break;
12551         }
12552 }
12553
12554 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12555 {
12556         u32 nvcfg1;
12557
12558         nvcfg1 = tr32(NVRAM_CFG1);
12559
12560         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12561         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12562         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12563         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12564         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12565                 tp->nvram_jedecnum = JEDEC_ATMEL;
12566                 tg3_flag_set(tp, NVRAM_BUFFERED);
12567                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12568
12569                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12570                 tw32(NVRAM_CFG1, nvcfg1);
12571                 break;
12572         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12573         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12574         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12575         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12576                 tp->nvram_jedecnum = JEDEC_ATMEL;
12577                 tg3_flag_set(tp, NVRAM_BUFFERED);
12578                 tg3_flag_set(tp, FLASH);
12579                 tp->nvram_pagesize = 264;
12580                 break;
12581         case FLASH_5752VENDOR_ST_M45PE10:
12582         case FLASH_5752VENDOR_ST_M45PE20:
12583         case FLASH_5752VENDOR_ST_M45PE40:
12584                 tp->nvram_jedecnum = JEDEC_ST;
12585                 tg3_flag_set(tp, NVRAM_BUFFERED);
12586                 tg3_flag_set(tp, FLASH);
12587                 tp->nvram_pagesize = 256;
12588                 break;
12589         }
12590 }
12591
12592 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12593 {
12594         u32 nvcfg1, protect = 0;
12595
12596         nvcfg1 = tr32(NVRAM_CFG1);
12597
12598         /* NVRAM protection for TPM */
12599         if (nvcfg1 & (1 << 27)) {
12600                 tg3_flag_set(tp, PROTECTED_NVRAM);
12601                 protect = 1;
12602         }
12603
12604         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12605         switch (nvcfg1) {
12606         case FLASH_5761VENDOR_ATMEL_ADB021D:
12607         case FLASH_5761VENDOR_ATMEL_ADB041D:
12608         case FLASH_5761VENDOR_ATMEL_ADB081D:
12609         case FLASH_5761VENDOR_ATMEL_ADB161D:
12610         case FLASH_5761VENDOR_ATMEL_MDB021D:
12611         case FLASH_5761VENDOR_ATMEL_MDB041D:
12612         case FLASH_5761VENDOR_ATMEL_MDB081D:
12613         case FLASH_5761VENDOR_ATMEL_MDB161D:
12614                 tp->nvram_jedecnum = JEDEC_ATMEL;
12615                 tg3_flag_set(tp, NVRAM_BUFFERED);
12616                 tg3_flag_set(tp, FLASH);
12617                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12618                 tp->nvram_pagesize = 256;
12619                 break;
12620         case FLASH_5761VENDOR_ST_A_M45PE20:
12621         case FLASH_5761VENDOR_ST_A_M45PE40:
12622         case FLASH_5761VENDOR_ST_A_M45PE80:
12623         case FLASH_5761VENDOR_ST_A_M45PE16:
12624         case FLASH_5761VENDOR_ST_M_M45PE20:
12625         case FLASH_5761VENDOR_ST_M_M45PE40:
12626         case FLASH_5761VENDOR_ST_M_M45PE80:
12627         case FLASH_5761VENDOR_ST_M_M45PE16:
12628                 tp->nvram_jedecnum = JEDEC_ST;
12629                 tg3_flag_set(tp, NVRAM_BUFFERED);
12630                 tg3_flag_set(tp, FLASH);
12631                 tp->nvram_pagesize = 256;
12632                 break;
12633         }
12634
12635         if (protect) {
12636                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12637         } else {
12638                 switch (nvcfg1) {
12639                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12640                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12641                 case FLASH_5761VENDOR_ST_A_M45PE16:
12642                 case FLASH_5761VENDOR_ST_M_M45PE16:
12643                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12644                         break;
12645                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12646                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12647                 case FLASH_5761VENDOR_ST_A_M45PE80:
12648                 case FLASH_5761VENDOR_ST_M_M45PE80:
12649                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12650                         break;
12651                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12652                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12653                 case FLASH_5761VENDOR_ST_A_M45PE40:
12654                 case FLASH_5761VENDOR_ST_M_M45PE40:
12655                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12656                         break;
12657                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12658                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12659                 case FLASH_5761VENDOR_ST_A_M45PE20:
12660                 case FLASH_5761VENDOR_ST_M_M45PE20:
12661                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12662                         break;
12663                 }
12664         }
12665 }
12666
12667 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12668 {
12669         tp->nvram_jedecnum = JEDEC_ATMEL;
12670         tg3_flag_set(tp, NVRAM_BUFFERED);
12671         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12672 }
12673
12674 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12675 {
12676         u32 nvcfg1;
12677
12678         nvcfg1 = tr32(NVRAM_CFG1);
12679
12680         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12681         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12682         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12683                 tp->nvram_jedecnum = JEDEC_ATMEL;
12684                 tg3_flag_set(tp, NVRAM_BUFFERED);
12685                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12686
12687                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12688                 tw32(NVRAM_CFG1, nvcfg1);
12689                 return;
12690         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12691         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12692         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12693         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12694         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12695         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12696         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12697                 tp->nvram_jedecnum = JEDEC_ATMEL;
12698                 tg3_flag_set(tp, NVRAM_BUFFERED);
12699                 tg3_flag_set(tp, FLASH);
12700
12701                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12702                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12703                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12704                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12705                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12706                         break;
12707                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12708                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12709                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12710                         break;
12711                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12712                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12713                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12714                         break;
12715                 }
12716                 break;
12717         case FLASH_5752VENDOR_ST_M45PE10:
12718         case FLASH_5752VENDOR_ST_M45PE20:
12719         case FLASH_5752VENDOR_ST_M45PE40:
12720                 tp->nvram_jedecnum = JEDEC_ST;
12721                 tg3_flag_set(tp, NVRAM_BUFFERED);
12722                 tg3_flag_set(tp, FLASH);
12723
12724                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12725                 case FLASH_5752VENDOR_ST_M45PE10:
12726                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12727                         break;
12728                 case FLASH_5752VENDOR_ST_M45PE20:
12729                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12730                         break;
12731                 case FLASH_5752VENDOR_ST_M45PE40:
12732                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12733                         break;
12734                 }
12735                 break;
12736         default:
12737                 tg3_flag_set(tp, NO_NVRAM);
12738                 return;
12739         }
12740
12741         tg3_nvram_get_pagesize(tp, nvcfg1);
12742         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12743                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12744 }
12745
12746
12747 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12748 {
12749         u32 nvcfg1;
12750
12751         nvcfg1 = tr32(NVRAM_CFG1);
12752
12753         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12754         case FLASH_5717VENDOR_ATMEL_EEPROM:
12755         case FLASH_5717VENDOR_MICRO_EEPROM:
12756                 tp->nvram_jedecnum = JEDEC_ATMEL;
12757                 tg3_flag_set(tp, NVRAM_BUFFERED);
12758                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12759
12760                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12761                 tw32(NVRAM_CFG1, nvcfg1);
12762                 return;
12763         case FLASH_5717VENDOR_ATMEL_MDB011D:
12764         case FLASH_5717VENDOR_ATMEL_ADB011B:
12765         case FLASH_5717VENDOR_ATMEL_ADB011D:
12766         case FLASH_5717VENDOR_ATMEL_MDB021D:
12767         case FLASH_5717VENDOR_ATMEL_ADB021B:
12768         case FLASH_5717VENDOR_ATMEL_ADB021D:
12769         case FLASH_5717VENDOR_ATMEL_45USPT:
12770                 tp->nvram_jedecnum = JEDEC_ATMEL;
12771                 tg3_flag_set(tp, NVRAM_BUFFERED);
12772                 tg3_flag_set(tp, FLASH);
12773
12774                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12775                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12776                         /* Detect size with tg3_nvram_get_size() */
12777                         break;
12778                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12779                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12780                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12781                         break;
12782                 default:
12783                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12784                         break;
12785                 }
12786                 break;
12787         case FLASH_5717VENDOR_ST_M_M25PE10:
12788         case FLASH_5717VENDOR_ST_A_M25PE10:
12789         case FLASH_5717VENDOR_ST_M_M45PE10:
12790         case FLASH_5717VENDOR_ST_A_M45PE10:
12791         case FLASH_5717VENDOR_ST_M_M25PE20:
12792         case FLASH_5717VENDOR_ST_A_M25PE20:
12793         case FLASH_5717VENDOR_ST_M_M45PE20:
12794         case FLASH_5717VENDOR_ST_A_M45PE20:
12795         case FLASH_5717VENDOR_ST_25USPT:
12796         case FLASH_5717VENDOR_ST_45USPT:
12797                 tp->nvram_jedecnum = JEDEC_ST;
12798                 tg3_flag_set(tp, NVRAM_BUFFERED);
12799                 tg3_flag_set(tp, FLASH);
12800
12801                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12802                 case FLASH_5717VENDOR_ST_M_M25PE20:
12803                 case FLASH_5717VENDOR_ST_M_M45PE20:
12804                         /* Detect size with tg3_nvram_get_size() */
12805                         break;
12806                 case FLASH_5717VENDOR_ST_A_M25PE20:
12807                 case FLASH_5717VENDOR_ST_A_M45PE20:
12808                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12809                         break;
12810                 default:
12811                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12812                         break;
12813                 }
12814                 break;
12815         default:
12816                 tg3_flag_set(tp, NO_NVRAM);
12817                 return;
12818         }
12819
12820         tg3_nvram_get_pagesize(tp, nvcfg1);
12821         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12822                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12823 }
12824
12825 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12826 {
12827         u32 nvcfg1, nvmpinstrp;
12828
12829         nvcfg1 = tr32(NVRAM_CFG1);
12830         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12831
12832         switch (nvmpinstrp) {
12833         case FLASH_5720_EEPROM_HD:
12834         case FLASH_5720_EEPROM_LD:
12835                 tp->nvram_jedecnum = JEDEC_ATMEL;
12836                 tg3_flag_set(tp, NVRAM_BUFFERED);
12837
12838                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12839                 tw32(NVRAM_CFG1, nvcfg1);
12840                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12841                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12842                 else
12843                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12844                 return;
12845         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12846         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12847         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12848         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12849         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12850         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12851         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12852         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12853         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12854         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12855         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12856         case FLASH_5720VENDOR_ATMEL_45USPT:
12857                 tp->nvram_jedecnum = JEDEC_ATMEL;
12858                 tg3_flag_set(tp, NVRAM_BUFFERED);
12859                 tg3_flag_set(tp, FLASH);
12860
12861                 switch (nvmpinstrp) {
12862                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12863                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12864                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12865                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12866                         break;
12867                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12868                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12869                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12870                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12871                         break;
12872                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12873                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12874                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12875                         break;
12876                 default:
12877                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12878                         break;
12879                 }
12880                 break;
12881         case FLASH_5720VENDOR_M_ST_M25PE10:
12882         case FLASH_5720VENDOR_M_ST_M45PE10:
12883         case FLASH_5720VENDOR_A_ST_M25PE10:
12884         case FLASH_5720VENDOR_A_ST_M45PE10:
12885         case FLASH_5720VENDOR_M_ST_M25PE20:
12886         case FLASH_5720VENDOR_M_ST_M45PE20:
12887         case FLASH_5720VENDOR_A_ST_M25PE20:
12888         case FLASH_5720VENDOR_A_ST_M45PE20:
12889         case FLASH_5720VENDOR_M_ST_M25PE40:
12890         case FLASH_5720VENDOR_M_ST_M45PE40:
12891         case FLASH_5720VENDOR_A_ST_M25PE40:
12892         case FLASH_5720VENDOR_A_ST_M45PE40:
12893         case FLASH_5720VENDOR_M_ST_M25PE80:
12894         case FLASH_5720VENDOR_M_ST_M45PE80:
12895         case FLASH_5720VENDOR_A_ST_M25PE80:
12896         case FLASH_5720VENDOR_A_ST_M45PE80:
12897         case FLASH_5720VENDOR_ST_25USPT:
12898         case FLASH_5720VENDOR_ST_45USPT:
12899                 tp->nvram_jedecnum = JEDEC_ST;
12900                 tg3_flag_set(tp, NVRAM_BUFFERED);
12901                 tg3_flag_set(tp, FLASH);
12902
12903                 switch (nvmpinstrp) {
12904                 case FLASH_5720VENDOR_M_ST_M25PE20:
12905                 case FLASH_5720VENDOR_M_ST_M45PE20:
12906                 case FLASH_5720VENDOR_A_ST_M25PE20:
12907                 case FLASH_5720VENDOR_A_ST_M45PE20:
12908                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12909                         break;
12910                 case FLASH_5720VENDOR_M_ST_M25PE40:
12911                 case FLASH_5720VENDOR_M_ST_M45PE40:
12912                 case FLASH_5720VENDOR_A_ST_M25PE40:
12913                 case FLASH_5720VENDOR_A_ST_M45PE40:
12914                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12915                         break;
12916                 case FLASH_5720VENDOR_M_ST_M25PE80:
12917                 case FLASH_5720VENDOR_M_ST_M45PE80:
12918                 case FLASH_5720VENDOR_A_ST_M25PE80:
12919                 case FLASH_5720VENDOR_A_ST_M45PE80:
12920                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12921                         break;
12922                 default:
12923                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12924                         break;
12925                 }
12926                 break;
12927         default:
12928                 tg3_flag_set(tp, NO_NVRAM);
12929                 return;
12930         }
12931
12932         tg3_nvram_get_pagesize(tp, nvcfg1);
12933         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12934                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12935 }
12936
12937 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12938 static void __devinit tg3_nvram_init(struct tg3 *tp)
12939 {
12940         tw32_f(GRC_EEPROM_ADDR,
12941              (EEPROM_ADDR_FSM_RESET |
12942               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12943                EEPROM_ADDR_CLKPERD_SHIFT)));
12944
12945         msleep(1);
12946
12947         /* Enable seeprom accesses. */
12948         tw32_f(GRC_LOCAL_CTRL,
12949              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12950         udelay(100);
12951
12952         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12953             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12954                 tg3_flag_set(tp, NVRAM);
12955
12956                 if (tg3_nvram_lock(tp)) {
12957                         netdev_warn(tp->dev,
12958                                     "Cannot get nvram lock, %s failed\n",
12959                                     __func__);
12960                         return;
12961                 }
12962                 tg3_enable_nvram_access(tp);
12963
12964                 tp->nvram_size = 0;
12965
12966                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12967                         tg3_get_5752_nvram_info(tp);
12968                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12969                         tg3_get_5755_nvram_info(tp);
12970                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12971                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12972                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12973                         tg3_get_5787_nvram_info(tp);
12974                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12975                         tg3_get_5761_nvram_info(tp);
12976                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12977                         tg3_get_5906_nvram_info(tp);
12978                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12979                          tg3_flag(tp, 57765_CLASS))
12980                         tg3_get_57780_nvram_info(tp);
12981                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12982                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12983                         tg3_get_5717_nvram_info(tp);
12984                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12985                         tg3_get_5720_nvram_info(tp);
12986                 else
12987                         tg3_get_nvram_info(tp);
12988
12989                 if (tp->nvram_size == 0)
12990                         tg3_get_nvram_size(tp);
12991
12992                 tg3_disable_nvram_access(tp);
12993                 tg3_nvram_unlock(tp);
12994
12995         } else {
12996                 tg3_flag_clear(tp, NVRAM);
12997                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12998
12999                 tg3_get_eeprom_size(tp);
13000         }
13001 }
13002
13003 struct subsys_tbl_ent {
13004         u16 subsys_vendor, subsys_devid;
13005         u32 phy_id;
13006 };
13007
13008 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13009         /* Broadcom boards. */
13010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13011           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13013           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13015           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13017           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13019           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13021           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13022         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13023           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13024         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13025           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13026         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13027           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13028         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13029           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13030         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13031           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13032
13033         /* 3com boards. */
13034         { TG3PCI_SUBVENDOR_ID_3COM,
13035           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13036         { TG3PCI_SUBVENDOR_ID_3COM,
13037           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13038         { TG3PCI_SUBVENDOR_ID_3COM,
13039           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13040         { TG3PCI_SUBVENDOR_ID_3COM,
13041           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13042         { TG3PCI_SUBVENDOR_ID_3COM,
13043           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13044
13045         /* DELL boards. */
13046         { TG3PCI_SUBVENDOR_ID_DELL,
13047           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13048         { TG3PCI_SUBVENDOR_ID_DELL,
13049           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13050         { TG3PCI_SUBVENDOR_ID_DELL,
13051           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13052         { TG3PCI_SUBVENDOR_ID_DELL,
13053           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13054
13055         /* Compaq boards. */
13056         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13057           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13058         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13059           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13060         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13061           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13062         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13063           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13064         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13065           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13066
13067         /* IBM boards. */
13068         { TG3PCI_SUBVENDOR_ID_IBM,
13069           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13070 };
13071
13072 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13073 {
13074         int i;
13075
13076         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13077                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13078                      tp->pdev->subsystem_vendor) &&
13079                     (subsys_id_to_phy_id[i].subsys_devid ==
13080                      tp->pdev->subsystem_device))
13081                         return &subsys_id_to_phy_id[i];
13082         }
13083         return NULL;
13084 }
13085
13086 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13087 {
13088         u32 val;
13089
13090         tp->phy_id = TG3_PHY_ID_INVALID;
13091         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13092
13093         /* Assume an onboard device and WOL capable by default.  */
13094         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13095         tg3_flag_set(tp, WOL_CAP);
13096
13097         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13098                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13099                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13100                         tg3_flag_set(tp, IS_NIC);
13101                 }
13102                 val = tr32(VCPU_CFGSHDW);
13103                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13104                         tg3_flag_set(tp, ASPM_WORKAROUND);
13105                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13106                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13107                         tg3_flag_set(tp, WOL_ENABLE);
13108                         device_set_wakeup_enable(&tp->pdev->dev, true);
13109                 }
13110                 goto done;
13111         }
13112
13113         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13114         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13115                 u32 nic_cfg, led_cfg;
13116                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13117                 int eeprom_phy_serdes = 0;
13118
13119                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13120                 tp->nic_sram_data_cfg = nic_cfg;
13121
13122                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13123                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13125                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13126                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13127                     (ver > 0) && (ver < 0x100))
13128                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13129
13130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13131                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13132
13133                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13134                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13135                         eeprom_phy_serdes = 1;
13136
13137                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13138                 if (nic_phy_id != 0) {
13139                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13140                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13141
13142                         eeprom_phy_id  = (id1 >> 16) << 10;
13143                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13144                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13145                 } else
13146                         eeprom_phy_id = 0;
13147
13148                 tp->phy_id = eeprom_phy_id;
13149                 if (eeprom_phy_serdes) {
13150                         if (!tg3_flag(tp, 5705_PLUS))
13151                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13152                         else
13153                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13154                 }
13155
13156                 if (tg3_flag(tp, 5750_PLUS))
13157                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13158                                     SHASTA_EXT_LED_MODE_MASK);
13159                 else
13160                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13161
13162                 switch (led_cfg) {
13163                 default:
13164                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13165                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13166                         break;
13167
13168                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13169                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13170                         break;
13171
13172                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13173                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13174
13175                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13176                          * read on some older 5700/5701 bootcode.
13177                          */
13178                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13179                             ASIC_REV_5700 ||
13180                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13181                             ASIC_REV_5701)
13182                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13183
13184                         break;
13185
13186                 case SHASTA_EXT_LED_SHARED:
13187                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13188                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13189                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13190                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13191                                                  LED_CTRL_MODE_PHY_2);
13192                         break;
13193
13194                 case SHASTA_EXT_LED_MAC:
13195                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13196                         break;
13197
13198                 case SHASTA_EXT_LED_COMBO:
13199                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13200                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13201                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13202                                                  LED_CTRL_MODE_PHY_2);
13203                         break;
13204
13205                 }
13206
13207                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13208                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13209                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13210                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13211
13212                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13213                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13214
13215                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13216                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13217                         if ((tp->pdev->subsystem_vendor ==
13218                              PCI_VENDOR_ID_ARIMA) &&
13219                             (tp->pdev->subsystem_device == 0x205a ||
13220                              tp->pdev->subsystem_device == 0x2063))
13221                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13222                 } else {
13223                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13224                         tg3_flag_set(tp, IS_NIC);
13225                 }
13226
13227                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13228                         tg3_flag_set(tp, ENABLE_ASF);
13229                         if (tg3_flag(tp, 5750_PLUS))
13230                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13231                 }
13232
13233                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13234                     tg3_flag(tp, 5750_PLUS))
13235                         tg3_flag_set(tp, ENABLE_APE);
13236
13237                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13238                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13239                         tg3_flag_clear(tp, WOL_CAP);
13240
13241                 if (tg3_flag(tp, WOL_CAP) &&
13242                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13243                         tg3_flag_set(tp, WOL_ENABLE);
13244                         device_set_wakeup_enable(&tp->pdev->dev, true);
13245                 }
13246
13247                 if (cfg2 & (1 << 17))
13248                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13249
13250                 /* serdes signal pre-emphasis in register 0x590 set by */
13251                 /* bootcode if bit 18 is set */
13252                 if (cfg2 & (1 << 18))
13253                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13254
13255                 if ((tg3_flag(tp, 57765_PLUS) ||
13256                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13257                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13258                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13259                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13260
13261                 if (tg3_flag(tp, PCI_EXPRESS) &&
13262                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13263                     !tg3_flag(tp, 57765_PLUS)) {
13264                         u32 cfg3;
13265
13266                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13267                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13268                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13269                 }
13270
13271                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13272                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13273                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13274                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13275                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13276                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13277         }
13278 done:
13279         if (tg3_flag(tp, WOL_CAP))
13280                 device_set_wakeup_enable(&tp->pdev->dev,
13281                                          tg3_flag(tp, WOL_ENABLE));
13282         else
13283                 device_set_wakeup_capable(&tp->pdev->dev, false);
13284 }
13285
13286 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13287 {
13288         int i;
13289         u32 val;
13290
13291         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13292         tw32(OTP_CTRL, cmd);
13293
13294         /* Wait for up to 1 ms for command to execute. */
13295         for (i = 0; i < 100; i++) {
13296                 val = tr32(OTP_STATUS);
13297                 if (val & OTP_STATUS_CMD_DONE)
13298                         break;
13299                 udelay(10);
13300         }
13301
13302         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13303 }
13304
13305 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13306  * configuration is a 32-bit value that straddles the alignment boundary.
13307  * We do two 32-bit reads and then shift and merge the results.
13308  */
13309 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13310 {
13311         u32 bhalf_otp, thalf_otp;
13312
13313         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13314
13315         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13316                 return 0;
13317
13318         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13319
13320         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13321                 return 0;
13322
13323         thalf_otp = tr32(OTP_READ_DATA);
13324
13325         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13326
13327         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13328                 return 0;
13329
13330         bhalf_otp = tr32(OTP_READ_DATA);
13331
13332         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13333 }
13334
13335 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13336 {
13337         u32 adv = ADVERTISED_Autoneg;
13338
13339         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13340                 adv |= ADVERTISED_1000baseT_Half |
13341                        ADVERTISED_1000baseT_Full;
13342
13343         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13344                 adv |= ADVERTISED_100baseT_Half |
13345                        ADVERTISED_100baseT_Full |
13346                        ADVERTISED_10baseT_Half |
13347                        ADVERTISED_10baseT_Full |
13348                        ADVERTISED_TP;
13349         else
13350                 adv |= ADVERTISED_FIBRE;
13351
13352         tp->link_config.advertising = adv;
13353         tp->link_config.speed = SPEED_INVALID;
13354         tp->link_config.duplex = DUPLEX_INVALID;
13355         tp->link_config.autoneg = AUTONEG_ENABLE;
13356         tp->link_config.active_speed = SPEED_INVALID;
13357         tp->link_config.active_duplex = DUPLEX_INVALID;
13358         tp->link_config.orig_speed = SPEED_INVALID;
13359         tp->link_config.orig_duplex = DUPLEX_INVALID;
13360         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13361 }
13362
13363 static int __devinit tg3_phy_probe(struct tg3 *tp)
13364 {
13365         u32 hw_phy_id_1, hw_phy_id_2;
13366         u32 hw_phy_id, hw_phy_id_masked;
13367         int err;
13368
13369         /* flow control autonegotiation is default behavior */
13370         tg3_flag_set(tp, PAUSE_AUTONEG);
13371         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13372
13373         if (tg3_flag(tp, USE_PHYLIB))
13374                 return tg3_phy_init(tp);
13375
13376         /* Reading the PHY ID register can conflict with ASF
13377          * firmware access to the PHY hardware.
13378          */
13379         err = 0;
13380         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13381                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13382         } else {
13383                 /* Now read the physical PHY_ID from the chip and verify
13384                  * that it is sane.  If it doesn't look good, we fall back
13385                  * to either the hard-coded table based PHY_ID and failing
13386                  * that the value found in the eeprom area.
13387                  */
13388                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13389                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13390
13391                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13392                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13393                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13394
13395                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13396         }
13397
13398         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13399                 tp->phy_id = hw_phy_id;
13400                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13401                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13402                 else
13403                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13404         } else {
13405                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13406                         /* Do nothing, phy ID already set up in
13407                          * tg3_get_eeprom_hw_cfg().
13408                          */
13409                 } else {
13410                         struct subsys_tbl_ent *p;
13411
13412                         /* No eeprom signature?  Try the hardcoded
13413                          * subsys device table.
13414                          */
13415                         p = tg3_lookup_by_subsys(tp);
13416                         if (!p)
13417                                 return -ENODEV;
13418
13419                         tp->phy_id = p->phy_id;
13420                         if (!tp->phy_id ||
13421                             tp->phy_id == TG3_PHY_ID_BCM8002)
13422                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13423                 }
13424         }
13425
13426         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13427             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13428              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13429              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13430               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13431              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13432               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13433                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13434
13435         tg3_phy_init_link_config(tp);
13436
13437         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13438             !tg3_flag(tp, ENABLE_APE) &&
13439             !tg3_flag(tp, ENABLE_ASF)) {
13440                 u32 bmsr, dummy;
13441
13442                 tg3_readphy(tp, MII_BMSR, &bmsr);
13443                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13444                     (bmsr & BMSR_LSTATUS))
13445                         goto skip_phy_reset;
13446
13447                 err = tg3_phy_reset(tp);
13448                 if (err)
13449                         return err;
13450
13451                 tg3_phy_set_wirespeed(tp);
13452
13453                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13454                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13455                                             tp->link_config.flowctrl);
13456
13457                         tg3_writephy(tp, MII_BMCR,
13458                                      BMCR_ANENABLE | BMCR_ANRESTART);
13459                 }
13460         }
13461
13462 skip_phy_reset:
13463         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13464                 err = tg3_init_5401phy_dsp(tp);
13465                 if (err)
13466                         return err;
13467
13468                 err = tg3_init_5401phy_dsp(tp);
13469         }
13470
13471         return err;
13472 }
13473
13474 static void __devinit tg3_read_vpd(struct tg3 *tp)
13475 {
13476         u8 *vpd_data;
13477         unsigned int block_end, rosize, len;
13478         u32 vpdlen;
13479         int j, i = 0;
13480
13481         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13482         if (!vpd_data)
13483                 goto out_no_vpd;
13484
13485         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13486         if (i < 0)
13487                 goto out_not_found;
13488
13489         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13490         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13491         i += PCI_VPD_LRDT_TAG_SIZE;
13492
13493         if (block_end > vpdlen)
13494                 goto out_not_found;
13495
13496         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13497                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13498         if (j > 0) {
13499                 len = pci_vpd_info_field_size(&vpd_data[j]);
13500
13501                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13502                 if (j + len > block_end || len != 4 ||
13503                     memcmp(&vpd_data[j], "1028", 4))
13504                         goto partno;
13505
13506                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13507                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13508                 if (j < 0)
13509                         goto partno;
13510
13511                 len = pci_vpd_info_field_size(&vpd_data[j]);
13512
13513                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13514                 if (j + len > block_end)
13515                         goto partno;
13516
13517                 memcpy(tp->fw_ver, &vpd_data[j], len);
13518                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13519         }
13520
13521 partno:
13522         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13523                                       PCI_VPD_RO_KEYWORD_PARTNO);
13524         if (i < 0)
13525                 goto out_not_found;
13526
13527         len = pci_vpd_info_field_size(&vpd_data[i]);
13528
13529         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13530         if (len > TG3_BPN_SIZE ||
13531             (len + i) > vpdlen)
13532                 goto out_not_found;
13533
13534         memcpy(tp->board_part_number, &vpd_data[i], len);
13535
13536 out_not_found:
13537         kfree(vpd_data);
13538         if (tp->board_part_number[0])
13539                 return;
13540
13541 out_no_vpd:
13542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13544                         strcpy(tp->board_part_number, "BCM5717");
13545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13546                         strcpy(tp->board_part_number, "BCM5718");
13547                 else
13548                         goto nomatch;
13549         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13550                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13551                         strcpy(tp->board_part_number, "BCM57780");
13552                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13553                         strcpy(tp->board_part_number, "BCM57760");
13554                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13555                         strcpy(tp->board_part_number, "BCM57790");
13556                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13557                         strcpy(tp->board_part_number, "BCM57788");
13558                 else
13559                         goto nomatch;
13560         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13561                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13562                         strcpy(tp->board_part_number, "BCM57761");
13563                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13564                         strcpy(tp->board_part_number, "BCM57765");
13565                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13566                         strcpy(tp->board_part_number, "BCM57781");
13567                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13568                         strcpy(tp->board_part_number, "BCM57785");
13569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13570                         strcpy(tp->board_part_number, "BCM57791");
13571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13572                         strcpy(tp->board_part_number, "BCM57795");
13573                 else
13574                         goto nomatch;
13575         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13576                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13577                         strcpy(tp->board_part_number, "BCM57762");
13578                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13579                         strcpy(tp->board_part_number, "BCM57766");
13580                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13581                         strcpy(tp->board_part_number, "BCM57782");
13582                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13583                         strcpy(tp->board_part_number, "BCM57786");
13584                 else
13585                         goto nomatch;
13586         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13587                 strcpy(tp->board_part_number, "BCM95906");
13588         } else {
13589 nomatch:
13590                 strcpy(tp->board_part_number, "none");
13591         }
13592 }
13593
13594 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13595 {
13596         u32 val;
13597
13598         if (tg3_nvram_read(tp, offset, &val) ||
13599             (val & 0xfc000000) != 0x0c000000 ||
13600             tg3_nvram_read(tp, offset + 4, &val) ||
13601             val != 0)
13602                 return 0;
13603
13604         return 1;
13605 }
13606
13607 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13608 {
13609         u32 val, offset, start, ver_offset;
13610         int i, dst_off;
13611         bool newver = false;
13612
13613         if (tg3_nvram_read(tp, 0xc, &offset) ||
13614             tg3_nvram_read(tp, 0x4, &start))
13615                 return;
13616
13617         offset = tg3_nvram_logical_addr(tp, offset);
13618
13619         if (tg3_nvram_read(tp, offset, &val))
13620                 return;
13621
13622         if ((val & 0xfc000000) == 0x0c000000) {
13623                 if (tg3_nvram_read(tp, offset + 4, &val))
13624                         return;
13625
13626                 if (val == 0)
13627                         newver = true;
13628         }
13629
13630         dst_off = strlen(tp->fw_ver);
13631
13632         if (newver) {
13633                 if (TG3_VER_SIZE - dst_off < 16 ||
13634                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13635                         return;
13636
13637                 offset = offset + ver_offset - start;
13638                 for (i = 0; i < 16; i += 4) {
13639                         __be32 v;
13640                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13641                                 return;
13642
13643                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13644                 }
13645         } else {
13646                 u32 major, minor;
13647
13648                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13649                         return;
13650
13651                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13652                         TG3_NVM_BCVER_MAJSFT;
13653                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13654                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13655                          "v%d.%02d", major, minor);
13656         }
13657 }
13658
13659 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13660 {
13661         u32 val, major, minor;
13662
13663         /* Use native endian representation */
13664         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13665                 return;
13666
13667         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13668                 TG3_NVM_HWSB_CFG1_MAJSFT;
13669         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13670                 TG3_NVM_HWSB_CFG1_MINSFT;
13671
13672         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13673 }
13674
13675 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13676 {
13677         u32 offset, major, minor, build;
13678
13679         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13680
13681         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13682                 return;
13683
13684         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13685         case TG3_EEPROM_SB_REVISION_0:
13686                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13687                 break;
13688         case TG3_EEPROM_SB_REVISION_2:
13689                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13690                 break;
13691         case TG3_EEPROM_SB_REVISION_3:
13692                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13693                 break;
13694         case TG3_EEPROM_SB_REVISION_4:
13695                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13696                 break;
13697         case TG3_EEPROM_SB_REVISION_5:
13698                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13699                 break;
13700         case TG3_EEPROM_SB_REVISION_6:
13701                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13702                 break;
13703         default:
13704                 return;
13705         }
13706
13707         if (tg3_nvram_read(tp, offset, &val))
13708                 return;
13709
13710         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13711                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13712         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13713                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13714         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13715
13716         if (minor > 99 || build > 26)
13717                 return;
13718
13719         offset = strlen(tp->fw_ver);
13720         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13721                  " v%d.%02d", major, minor);
13722
13723         if (build > 0) {
13724                 offset = strlen(tp->fw_ver);
13725                 if (offset < TG3_VER_SIZE - 1)
13726                         tp->fw_ver[offset] = 'a' + build - 1;
13727         }
13728 }
13729
13730 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13731 {
13732         u32 val, offset, start;
13733         int i, vlen;
13734
13735         for (offset = TG3_NVM_DIR_START;
13736              offset < TG3_NVM_DIR_END;
13737              offset += TG3_NVM_DIRENT_SIZE) {
13738                 if (tg3_nvram_read(tp, offset, &val))
13739                         return;
13740
13741                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13742                         break;
13743         }
13744
13745         if (offset == TG3_NVM_DIR_END)
13746                 return;
13747
13748         if (!tg3_flag(tp, 5705_PLUS))
13749                 start = 0x08000000;
13750         else if (tg3_nvram_read(tp, offset - 4, &start))
13751                 return;
13752
13753         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13754             !tg3_fw_img_is_valid(tp, offset) ||
13755             tg3_nvram_read(tp, offset + 8, &val))
13756                 return;
13757
13758         offset += val - start;
13759
13760         vlen = strlen(tp->fw_ver);
13761
13762         tp->fw_ver[vlen++] = ',';
13763         tp->fw_ver[vlen++] = ' ';
13764
13765         for (i = 0; i < 4; i++) {
13766                 __be32 v;
13767                 if (tg3_nvram_read_be32(tp, offset, &v))
13768                         return;
13769
13770                 offset += sizeof(v);
13771
13772                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13773                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13774                         break;
13775                 }
13776
13777                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13778                 vlen += sizeof(v);
13779         }
13780 }
13781
13782 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13783 {
13784         int vlen;
13785         u32 apedata;
13786         char *fwtype;
13787
13788         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13789                 return;
13790
13791         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13792         if (apedata != APE_SEG_SIG_MAGIC)
13793                 return;
13794
13795         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13796         if (!(apedata & APE_FW_STATUS_READY))
13797                 return;
13798
13799         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13800
13801         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13802                 tg3_flag_set(tp, APE_HAS_NCSI);
13803                 fwtype = "NCSI";
13804         } else {
13805                 fwtype = "DASH";
13806         }
13807
13808         vlen = strlen(tp->fw_ver);
13809
13810         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13811                  fwtype,
13812                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13813                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13814                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13815                  (apedata & APE_FW_VERSION_BLDMSK));
13816 }
13817
13818 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13819 {
13820         u32 val;
13821         bool vpd_vers = false;
13822
13823         if (tp->fw_ver[0] != 0)
13824                 vpd_vers = true;
13825
13826         if (tg3_flag(tp, NO_NVRAM)) {
13827                 strcat(tp->fw_ver, "sb");
13828                 return;
13829         }
13830
13831         if (tg3_nvram_read(tp, 0, &val))
13832                 return;
13833
13834         if (val == TG3_EEPROM_MAGIC)
13835                 tg3_read_bc_ver(tp);
13836         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13837                 tg3_read_sb_ver(tp, val);
13838         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13839                 tg3_read_hwsb_ver(tp);
13840         else
13841                 return;
13842
13843         if (vpd_vers)
13844                 goto done;
13845
13846         if (tg3_flag(tp, ENABLE_APE)) {
13847                 if (tg3_flag(tp, ENABLE_ASF))
13848                         tg3_read_dash_ver(tp);
13849         } else if (tg3_flag(tp, ENABLE_ASF)) {
13850                 tg3_read_mgmtfw_ver(tp);
13851         }
13852
13853 done:
13854         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13855 }
13856
13857 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13858 {
13859         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13860                 return TG3_RX_RET_MAX_SIZE_5717;
13861         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13862                 return TG3_RX_RET_MAX_SIZE_5700;
13863         else
13864                 return TG3_RX_RET_MAX_SIZE_5705;
13865 }
13866
13867 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13868         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13869         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13870         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13871         { },
13872 };
13873
13874 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13875 {
13876         struct pci_dev *peer;
13877         unsigned int func, devnr = tp->pdev->devfn & ~7;
13878
13879         for (func = 0; func < 8; func++) {
13880                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13881                 if (peer && peer != tp->pdev)
13882                         break;
13883                 pci_dev_put(peer);
13884         }
13885         /* 5704 can be configured in single-port mode, set peer to
13886          * tp->pdev in that case.
13887          */
13888         if (!peer) {
13889                 peer = tp->pdev;
13890                 return peer;
13891         }
13892
13893         /*
13894          * We don't need to keep the refcount elevated; there's no way
13895          * to remove one half of this device without removing the other
13896          */
13897         pci_dev_put(peer);
13898
13899         return peer;
13900 }
13901
13902 static int __devinit tg3_get_invariants(struct tg3 *tp)
13903 {
13904         u32 misc_ctrl_reg;
13905         u32 pci_state_reg, grc_misc_cfg;
13906         u32 val;
13907         u16 pci_cmd;
13908         int err;
13909
13910         /* Force memory write invalidate off.  If we leave it on,
13911          * then on 5700_BX chips we have to enable a workaround.
13912          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13913          * to match the cacheline size.  The Broadcom driver have this
13914          * workaround but turns MWI off all the times so never uses
13915          * it.  This seems to suggest that the workaround is insufficient.
13916          */
13917         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13918         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13919         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13920
13921         /* Important! -- Make sure register accesses are byteswapped
13922          * correctly.  Also, for those chips that require it, make
13923          * sure that indirect register accesses are enabled before
13924          * the first operation.
13925          */
13926         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13927                               &misc_ctrl_reg);
13928         tp->misc_host_ctrl |= (misc_ctrl_reg &
13929                                MISC_HOST_CTRL_CHIPREV);
13930         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13931                                tp->misc_host_ctrl);
13932
13933         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13934                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13935         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13936                 u32 prod_id_asic_rev;
13937
13938                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13939                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13940                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13941                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13942                         pci_read_config_dword(tp->pdev,
13943                                               TG3PCI_GEN2_PRODID_ASICREV,
13944                                               &prod_id_asic_rev);
13945                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13946                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13947                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13948                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13949                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13950                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13951                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13952                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13953                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13954                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13955                         pci_read_config_dword(tp->pdev,
13956                                               TG3PCI_GEN15_PRODID_ASICREV,
13957                                               &prod_id_asic_rev);
13958                 else
13959                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13960                                               &prod_id_asic_rev);
13961
13962                 tp->pci_chip_rev_id = prod_id_asic_rev;
13963         }
13964
13965         /* Wrong chip ID in 5752 A0. This code can be removed later
13966          * as A0 is not in production.
13967          */
13968         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13969                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13970
13971         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13972          * we need to disable memory and use config. cycles
13973          * only to access all registers. The 5702/03 chips
13974          * can mistakenly decode the special cycles from the
13975          * ICH chipsets as memory write cycles, causing corruption
13976          * of register and memory space. Only certain ICH bridges
13977          * will drive special cycles with non-zero data during the
13978          * address phase which can fall within the 5703's address
13979          * range. This is not an ICH bug as the PCI spec allows
13980          * non-zero address during special cycles. However, only
13981          * these ICH bridges are known to drive non-zero addresses
13982          * during special cycles.
13983          *
13984          * Since special cycles do not cross PCI bridges, we only
13985          * enable this workaround if the 5703 is on the secondary
13986          * bus of these ICH bridges.
13987          */
13988         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13989             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13990                 static struct tg3_dev_id {
13991                         u32     vendor;
13992                         u32     device;
13993                         u32     rev;
13994                 } ich_chipsets[] = {
13995                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13996                           PCI_ANY_ID },
13997                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13998                           PCI_ANY_ID },
13999                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14000                           0xa },
14001                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14002                           PCI_ANY_ID },
14003                         { },
14004                 };
14005                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14006                 struct pci_dev *bridge = NULL;
14007
14008                 while (pci_id->vendor != 0) {
14009                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14010                                                 bridge);
14011                         if (!bridge) {
14012                                 pci_id++;
14013                                 continue;
14014                         }
14015                         if (pci_id->rev != PCI_ANY_ID) {
14016                                 if (bridge->revision > pci_id->rev)
14017                                         continue;
14018                         }
14019                         if (bridge->subordinate &&
14020                             (bridge->subordinate->number ==
14021                              tp->pdev->bus->number)) {
14022                                 tg3_flag_set(tp, ICH_WORKAROUND);
14023                                 pci_dev_put(bridge);
14024                                 break;
14025                         }
14026                 }
14027         }
14028
14029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14030                 static struct tg3_dev_id {
14031                         u32     vendor;
14032                         u32     device;
14033                 } bridge_chipsets[] = {
14034                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14035                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14036                         { },
14037                 };
14038                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14039                 struct pci_dev *bridge = NULL;
14040
14041                 while (pci_id->vendor != 0) {
14042                         bridge = pci_get_device(pci_id->vendor,
14043                                                 pci_id->device,
14044                                                 bridge);
14045                         if (!bridge) {
14046                                 pci_id++;
14047                                 continue;
14048                         }
14049                         if (bridge->subordinate &&
14050                             (bridge->subordinate->number <=
14051                              tp->pdev->bus->number) &&
14052                             (bridge->subordinate->subordinate >=
14053                              tp->pdev->bus->number)) {
14054                                 tg3_flag_set(tp, 5701_DMA_BUG);
14055                                 pci_dev_put(bridge);
14056                                 break;
14057                         }
14058                 }
14059         }
14060
14061         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14062          * DMA addresses > 40-bit. This bridge may have other additional
14063          * 57xx devices behind it in some 4-port NIC designs for example.
14064          * Any tg3 device found behind the bridge will also need the 40-bit
14065          * DMA workaround.
14066          */
14067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14069                 tg3_flag_set(tp, 5780_CLASS);
14070                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14071                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14072         } else {
14073                 struct pci_dev *bridge = NULL;
14074
14075                 do {
14076                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14077                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14078                                                 bridge);
14079                         if (bridge && bridge->subordinate &&
14080                             (bridge->subordinate->number <=
14081                              tp->pdev->bus->number) &&
14082                             (bridge->subordinate->subordinate >=
14083                              tp->pdev->bus->number)) {
14084                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14085                                 pci_dev_put(bridge);
14086                                 break;
14087                         }
14088                 } while (bridge);
14089         }
14090
14091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14093                 tp->pdev_peer = tg3_find_peer(tp);
14094
14095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14097             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14098                 tg3_flag_set(tp, 5717_PLUS);
14099
14100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14102                 tg3_flag_set(tp, 57765_CLASS);
14103
14104         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14105                 tg3_flag_set(tp, 57765_PLUS);
14106
14107         /* Intentionally exclude ASIC_REV_5906 */
14108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14111             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14114             tg3_flag(tp, 57765_PLUS))
14115                 tg3_flag_set(tp, 5755_PLUS);
14116
14117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14118             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14119             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14120             tg3_flag(tp, 5755_PLUS) ||
14121             tg3_flag(tp, 5780_CLASS))
14122                 tg3_flag_set(tp, 5750_PLUS);
14123
14124         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14125             tg3_flag(tp, 5750_PLUS))
14126                 tg3_flag_set(tp, 5705_PLUS);
14127
14128         /* Determine TSO capabilities */
14129         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14130                 ; /* Do nothing. HW bug. */
14131         else if (tg3_flag(tp, 57765_PLUS))
14132                 tg3_flag_set(tp, HW_TSO_3);
14133         else if (tg3_flag(tp, 5755_PLUS) ||
14134                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14135                 tg3_flag_set(tp, HW_TSO_2);
14136         else if (tg3_flag(tp, 5750_PLUS)) {
14137                 tg3_flag_set(tp, HW_TSO_1);
14138                 tg3_flag_set(tp, TSO_BUG);
14139                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14140                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14141                         tg3_flag_clear(tp, TSO_BUG);
14142         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14143                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14144                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14145                         tg3_flag_set(tp, TSO_BUG);
14146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14147                         tp->fw_needed = FIRMWARE_TG3TSO5;
14148                 else
14149                         tp->fw_needed = FIRMWARE_TG3TSO;
14150         }
14151
14152         /* Selectively allow TSO based on operating conditions */
14153         if (tg3_flag(tp, HW_TSO_1) ||
14154             tg3_flag(tp, HW_TSO_2) ||
14155             tg3_flag(tp, HW_TSO_3) ||
14156             tp->fw_needed) {
14157                 /* For firmware TSO, assume ASF is disabled.
14158                  * We'll disable TSO later if we discover ASF
14159                  * is enabled in tg3_get_eeprom_hw_cfg().
14160                  */
14161                 tg3_flag_set(tp, TSO_CAPABLE);
14162         } else {
14163                 tg3_flag_clear(tp, TSO_CAPABLE);
14164                 tg3_flag_clear(tp, TSO_BUG);
14165                 tp->fw_needed = NULL;
14166         }
14167
14168         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14169                 tp->fw_needed = FIRMWARE_TG3;
14170
14171         tp->irq_max = 1;
14172
14173         if (tg3_flag(tp, 5750_PLUS)) {
14174                 tg3_flag_set(tp, SUPPORT_MSI);
14175                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14176                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14177                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14178                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14179                      tp->pdev_peer == tp->pdev))
14180                         tg3_flag_clear(tp, SUPPORT_MSI);
14181
14182                 if (tg3_flag(tp, 5755_PLUS) ||
14183                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14184                         tg3_flag_set(tp, 1SHOT_MSI);
14185                 }
14186
14187                 if (tg3_flag(tp, 57765_PLUS)) {
14188                         tg3_flag_set(tp, SUPPORT_MSIX);
14189                         tp->irq_max = TG3_IRQ_MAX_VECS;
14190                         tg3_rss_init_dflt_indir_tbl(tp);
14191                 }
14192         }
14193
14194         if (tg3_flag(tp, 5755_PLUS))
14195                 tg3_flag_set(tp, SHORT_DMA_BUG);
14196
14197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14198                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14199
14200         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14201             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14202             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14203                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14204
14205         if (tg3_flag(tp, 57765_PLUS) &&
14206             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14207                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14208
14209         if (!tg3_flag(tp, 5705_PLUS) ||
14210             tg3_flag(tp, 5780_CLASS) ||
14211             tg3_flag(tp, USE_JUMBO_BDFLAG))
14212                 tg3_flag_set(tp, JUMBO_CAPABLE);
14213
14214         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14215                               &pci_state_reg);
14216
14217         if (pci_is_pcie(tp->pdev)) {
14218                 u16 lnkctl;
14219
14220                 tg3_flag_set(tp, PCI_EXPRESS);
14221
14222                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14223                         int readrq = pcie_get_readrq(tp->pdev);
14224                         if (readrq > 2048)
14225                                 pcie_set_readrq(tp->pdev, 2048);
14226                 }
14227
14228                 pci_read_config_word(tp->pdev,
14229                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14230                                      &lnkctl);
14231                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14232                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14233                             ASIC_REV_5906) {
14234                                 tg3_flag_clear(tp, HW_TSO_2);
14235                                 tg3_flag_clear(tp, TSO_CAPABLE);
14236                         }
14237                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14238                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14239                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14240                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14241                                 tg3_flag_set(tp, CLKREQ_BUG);
14242                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14243                         tg3_flag_set(tp, L1PLLPD_EN);
14244                 }
14245         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14246                 /* BCM5785 devices are effectively PCIe devices, and should
14247                  * follow PCIe codepaths, but do not have a PCIe capabilities
14248                  * section.
14249                  */
14250                 tg3_flag_set(tp, PCI_EXPRESS);
14251         } else if (!tg3_flag(tp, 5705_PLUS) ||
14252                    tg3_flag(tp, 5780_CLASS)) {
14253                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14254                 if (!tp->pcix_cap) {
14255                         dev_err(&tp->pdev->dev,
14256                                 "Cannot find PCI-X capability, aborting\n");
14257                         return -EIO;
14258                 }
14259
14260                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14261                         tg3_flag_set(tp, PCIX_MODE);
14262         }
14263
14264         /* If we have an AMD 762 or VIA K8T800 chipset, write
14265          * reordering to the mailbox registers done by the host
14266          * controller can cause major troubles.  We read back from
14267          * every mailbox register write to force the writes to be
14268          * posted to the chip in order.
14269          */
14270         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14271             !tg3_flag(tp, PCI_EXPRESS))
14272                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14273
14274         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14275                              &tp->pci_cacheline_sz);
14276         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14277                              &tp->pci_lat_timer);
14278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14279             tp->pci_lat_timer < 64) {
14280                 tp->pci_lat_timer = 64;
14281                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14282                                       tp->pci_lat_timer);
14283         }
14284
14285         /* Important! -- It is critical that the PCI-X hw workaround
14286          * situation is decided before the first MMIO register access.
14287          */
14288         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14289                 /* 5700 BX chips need to have their TX producer index
14290                  * mailboxes written twice to workaround a bug.
14291                  */
14292                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14293
14294                 /* If we are in PCI-X mode, enable register write workaround.
14295                  *
14296                  * The workaround is to use indirect register accesses
14297                  * for all chip writes not to mailbox registers.
14298                  */
14299                 if (tg3_flag(tp, PCIX_MODE)) {
14300                         u32 pm_reg;
14301
14302                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14303
14304                         /* The chip can have it's power management PCI config
14305                          * space registers clobbered due to this bug.
14306                          * So explicitly force the chip into D0 here.
14307                          */
14308                         pci_read_config_dword(tp->pdev,
14309                                               tp->pm_cap + PCI_PM_CTRL,
14310                                               &pm_reg);
14311                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14312                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14313                         pci_write_config_dword(tp->pdev,
14314                                                tp->pm_cap + PCI_PM_CTRL,
14315                                                pm_reg);
14316
14317                         /* Also, force SERR#/PERR# in PCI command. */
14318                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14319                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14320                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14321                 }
14322         }
14323
14324         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14325                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14326         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14327                 tg3_flag_set(tp, PCI_32BIT);
14328
14329         /* Chip-specific fixup from Broadcom driver */
14330         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14331             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14332                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14333                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14334         }
14335
14336         /* Default fast path register access methods */
14337         tp->read32 = tg3_read32;
14338         tp->write32 = tg3_write32;
14339         tp->read32_mbox = tg3_read32;
14340         tp->write32_mbox = tg3_write32;
14341         tp->write32_tx_mbox = tg3_write32;
14342         tp->write32_rx_mbox = tg3_write32;
14343
14344         /* Various workaround register access methods */
14345         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14346                 tp->write32 = tg3_write_indirect_reg32;
14347         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14348                  (tg3_flag(tp, PCI_EXPRESS) &&
14349                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14350                 /*
14351                  * Back to back register writes can cause problems on these
14352                  * chips, the workaround is to read back all reg writes
14353                  * except those to mailbox regs.
14354                  *
14355                  * See tg3_write_indirect_reg32().
14356                  */
14357                 tp->write32 = tg3_write_flush_reg32;
14358         }
14359
14360         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14361                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14362                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14363                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14364         }
14365
14366         if (tg3_flag(tp, ICH_WORKAROUND)) {
14367                 tp->read32 = tg3_read_indirect_reg32;
14368                 tp->write32 = tg3_write_indirect_reg32;
14369                 tp->read32_mbox = tg3_read_indirect_mbox;
14370                 tp->write32_mbox = tg3_write_indirect_mbox;
14371                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14372                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14373
14374                 iounmap(tp->regs);
14375                 tp->regs = NULL;
14376
14377                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14378                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14379                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14380         }
14381         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14382                 tp->read32_mbox = tg3_read32_mbox_5906;
14383                 tp->write32_mbox = tg3_write32_mbox_5906;
14384                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14385                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14386         }
14387
14388         if (tp->write32 == tg3_write_indirect_reg32 ||
14389             (tg3_flag(tp, PCIX_MODE) &&
14390              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14391               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14392                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14393
14394         /* The memory arbiter has to be enabled in order for SRAM accesses
14395          * to succeed.  Normally on powerup the tg3 chip firmware will make
14396          * sure it is enabled, but other entities such as system netboot
14397          * code might disable it.
14398          */
14399         val = tr32(MEMARB_MODE);
14400         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14401
14402         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14403         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14404             tg3_flag(tp, 5780_CLASS)) {
14405                 if (tg3_flag(tp, PCIX_MODE)) {
14406                         pci_read_config_dword(tp->pdev,
14407                                               tp->pcix_cap + PCI_X_STATUS,
14408                                               &val);
14409                         tp->pci_fn = val & 0x7;
14410                 }
14411         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14412                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14413                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14414                     NIC_SRAM_CPMUSTAT_SIG) {
14415                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14416                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14417                 }
14418         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14419                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14420                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14421                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14422                     NIC_SRAM_CPMUSTAT_SIG) {
14423                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14424                                      TG3_CPMU_STATUS_FSHFT_5719;
14425                 }
14426         }
14427
14428         /* Get eeprom hw config before calling tg3_set_power_state().
14429          * In particular, the TG3_FLAG_IS_NIC flag must be
14430          * determined before calling tg3_set_power_state() so that
14431          * we know whether or not to switch out of Vaux power.
14432          * When the flag is set, it means that GPIO1 is used for eeprom
14433          * write protect and also implies that it is a LOM where GPIOs
14434          * are not used to switch power.
14435          */
14436         tg3_get_eeprom_hw_cfg(tp);
14437
14438         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14439                 tg3_flag_clear(tp, TSO_CAPABLE);
14440                 tg3_flag_clear(tp, TSO_BUG);
14441                 tp->fw_needed = NULL;
14442         }
14443
14444         if (tg3_flag(tp, ENABLE_APE)) {
14445                 /* Allow reads and writes to the
14446                  * APE register and memory space.
14447                  */
14448                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14449                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14450                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14451                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14452                                        pci_state_reg);
14453
14454                 tg3_ape_lock_init(tp);
14455         }
14456
14457         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14458             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14459             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14460             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14461             tg3_flag(tp, 57765_PLUS))
14462                 tg3_flag_set(tp, CPMU_PRESENT);
14463
14464         /* Set up tp->grc_local_ctrl before calling
14465          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14466          * will bring 5700's external PHY out of reset.
14467          * It is also used as eeprom write protect on LOMs.
14468          */
14469         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14471             tg3_flag(tp, EEPROM_WRITE_PROT))
14472                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14473                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14474         /* Unused GPIO3 must be driven as output on 5752 because there
14475          * are no pull-up resistors on unused GPIO pins.
14476          */
14477         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14478                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14479
14480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14482             tg3_flag(tp, 57765_CLASS))
14483                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14484
14485         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14486             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14487                 /* Turn off the debug UART. */
14488                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14489                 if (tg3_flag(tp, IS_NIC))
14490                         /* Keep VMain power. */
14491                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14492                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14493         }
14494
14495         /* Switch out of Vaux if it is a NIC */
14496         tg3_pwrsrc_switch_to_vmain(tp);
14497
14498         /* Derive initial jumbo mode from MTU assigned in
14499          * ether_setup() via the alloc_etherdev() call
14500          */
14501         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14502                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14503
14504         /* Determine WakeOnLan speed to use. */
14505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14506             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14507             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14508             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14509                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14510         } else {
14511                 tg3_flag_set(tp, WOL_SPEED_100MB);
14512         }
14513
14514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14515                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14516
14517         /* A few boards don't want Ethernet@WireSpeed phy feature */
14518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14519             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14520              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14521              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14522             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14523             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14524                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14525
14526         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14527             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14528                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14529         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14530                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14531
14532         if (tg3_flag(tp, 5705_PLUS) &&
14533             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14534             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14535             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14536             !tg3_flag(tp, 57765_PLUS)) {
14537                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14538                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14539                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14541                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14542                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14543                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14544                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14545                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14546                 } else
14547                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14548         }
14549
14550         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14551             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14552                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14553                 if (tp->phy_otp == 0)
14554                         tp->phy_otp = TG3_OTP_DEFAULT;
14555         }
14556
14557         if (tg3_flag(tp, CPMU_PRESENT))
14558                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14559         else
14560                 tp->mi_mode = MAC_MI_MODE_BASE;
14561
14562         tp->coalesce_mode = 0;
14563         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14564             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14565                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14566
14567         /* Set these bits to enable statistics workaround. */
14568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14569             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14570             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14571                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14572                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14573         }
14574
14575         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14576             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14577                 tg3_flag_set(tp, USE_PHYLIB);
14578
14579         err = tg3_mdio_init(tp);
14580         if (err)
14581                 return err;
14582
14583         /* Initialize data/descriptor byte/word swapping. */
14584         val = tr32(GRC_MODE);
14585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14586                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14587                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14588                         GRC_MODE_B2HRX_ENABLE |
14589                         GRC_MODE_HTX2B_ENABLE |
14590                         GRC_MODE_HOST_STACKUP);
14591         else
14592                 val &= GRC_MODE_HOST_STACKUP;
14593
14594         tw32(GRC_MODE, val | tp->grc_mode);
14595
14596         tg3_switch_clocks(tp);
14597
14598         /* Clear this out for sanity. */
14599         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14600
14601         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14602                               &pci_state_reg);
14603         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14604             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14605                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14606
14607                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14608                     chiprevid == CHIPREV_ID_5701_B0 ||
14609                     chiprevid == CHIPREV_ID_5701_B2 ||
14610                     chiprevid == CHIPREV_ID_5701_B5) {
14611                         void __iomem *sram_base;
14612
14613                         /* Write some dummy words into the SRAM status block
14614                          * area, see if it reads back correctly.  If the return
14615                          * value is bad, force enable the PCIX workaround.
14616                          */
14617                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14618
14619                         writel(0x00000000, sram_base);
14620                         writel(0x00000000, sram_base + 4);
14621                         writel(0xffffffff, sram_base + 4);
14622                         if (readl(sram_base) != 0x00000000)
14623                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14624                 }
14625         }
14626
14627         udelay(50);
14628         tg3_nvram_init(tp);
14629
14630         grc_misc_cfg = tr32(GRC_MISC_CFG);
14631         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14632
14633         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14634             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14635              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14636                 tg3_flag_set(tp, IS_5788);
14637
14638         if (!tg3_flag(tp, IS_5788) &&
14639             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14640                 tg3_flag_set(tp, TAGGED_STATUS);
14641         if (tg3_flag(tp, TAGGED_STATUS)) {
14642                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14643                                       HOSTCC_MODE_CLRTICK_TXBD);
14644
14645                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14646                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14647                                        tp->misc_host_ctrl);
14648         }
14649
14650         /* Preserve the APE MAC_MODE bits */
14651         if (tg3_flag(tp, ENABLE_APE))
14652                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14653         else
14654                 tp->mac_mode = 0;
14655
14656         /* these are limited to 10/100 only */
14657         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14658              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14659             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14660              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14661              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14662               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14663               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14664             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14665              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14666               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14667               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14668             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14669             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14670             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14671             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14672                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14673
14674         err = tg3_phy_probe(tp);
14675         if (err) {
14676                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14677                 /* ... but do not return immediately ... */
14678                 tg3_mdio_fini(tp);
14679         }
14680
14681         tg3_read_vpd(tp);
14682         tg3_read_fw_ver(tp);
14683
14684         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14685                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14686         } else {
14687                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14688                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14689                 else
14690                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14691         }
14692
14693         /* 5700 {AX,BX} chips have a broken status block link
14694          * change bit implementation, so we must use the
14695          * status register in those cases.
14696          */
14697         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14698                 tg3_flag_set(tp, USE_LINKCHG_REG);
14699         else
14700                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14701
14702         /* The led_ctrl is set during tg3_phy_probe, here we might
14703          * have to force the link status polling mechanism based
14704          * upon subsystem IDs.
14705          */
14706         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14708             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14709                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14710                 tg3_flag_set(tp, USE_LINKCHG_REG);
14711         }
14712
14713         /* For all SERDES we poll the MAC status register. */
14714         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14715                 tg3_flag_set(tp, POLL_SERDES);
14716         else
14717                 tg3_flag_clear(tp, POLL_SERDES);
14718
14719         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14720         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14722             tg3_flag(tp, PCIX_MODE)) {
14723                 tp->rx_offset = NET_SKB_PAD;
14724 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14725                 tp->rx_copy_thresh = ~(u16)0;
14726 #endif
14727         }
14728
14729         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14730         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14731         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14732
14733         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14734
14735         /* Increment the rx prod index on the rx std ring by at most
14736          * 8 for these chips to workaround hw errata.
14737          */
14738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14739             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14740             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14741                 tp->rx_std_max_post = 8;
14742
14743         if (tg3_flag(tp, ASPM_WORKAROUND))
14744                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14745                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14746
14747         return err;
14748 }
14749
14750 #ifdef CONFIG_SPARC
14751 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14752 {
14753         struct net_device *dev = tp->dev;
14754         struct pci_dev *pdev = tp->pdev;
14755         struct device_node *dp = pci_device_to_OF_node(pdev);
14756         const unsigned char *addr;
14757         int len;
14758
14759         addr = of_get_property(dp, "local-mac-address", &len);
14760         if (addr && len == 6) {
14761                 memcpy(dev->dev_addr, addr, 6);
14762                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14763                 return 0;
14764         }
14765         return -ENODEV;
14766 }
14767
14768 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14769 {
14770         struct net_device *dev = tp->dev;
14771
14772         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14773         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14774         return 0;
14775 }
14776 #endif
14777
14778 static int __devinit tg3_get_device_address(struct tg3 *tp)
14779 {
14780         struct net_device *dev = tp->dev;
14781         u32 hi, lo, mac_offset;
14782         int addr_ok = 0;
14783
14784 #ifdef CONFIG_SPARC
14785         if (!tg3_get_macaddr_sparc(tp))
14786                 return 0;
14787 #endif
14788
14789         mac_offset = 0x7c;
14790         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14791             tg3_flag(tp, 5780_CLASS)) {
14792                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14793                         mac_offset = 0xcc;
14794                 if (tg3_nvram_lock(tp))
14795                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14796                 else
14797                         tg3_nvram_unlock(tp);
14798         } else if (tg3_flag(tp, 5717_PLUS)) {
14799                 if (tp->pci_fn & 1)
14800                         mac_offset = 0xcc;
14801                 if (tp->pci_fn > 1)
14802                         mac_offset += 0x18c;
14803         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14804                 mac_offset = 0x10;
14805
14806         /* First try to get it from MAC address mailbox. */
14807         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14808         if ((hi >> 16) == 0x484b) {
14809                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14810                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14811
14812                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14813                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14814                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14815                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14816                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14817
14818                 /* Some old bootcode may report a 0 MAC address in SRAM */
14819                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14820         }
14821         if (!addr_ok) {
14822                 /* Next, try NVRAM. */
14823                 if (!tg3_flag(tp, NO_NVRAM) &&
14824                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14825                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14826                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14827                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14828                 }
14829                 /* Finally just fetch it out of the MAC control regs. */
14830                 else {
14831                         hi = tr32(MAC_ADDR_0_HIGH);
14832                         lo = tr32(MAC_ADDR_0_LOW);
14833
14834                         dev->dev_addr[5] = lo & 0xff;
14835                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14836                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14837                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14838                         dev->dev_addr[1] = hi & 0xff;
14839                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14840                 }
14841         }
14842
14843         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14844 #ifdef CONFIG_SPARC
14845                 if (!tg3_get_default_macaddr_sparc(tp))
14846                         return 0;
14847 #endif
14848                 return -EINVAL;
14849         }
14850         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14851         return 0;
14852 }
14853
14854 #define BOUNDARY_SINGLE_CACHELINE       1
14855 #define BOUNDARY_MULTI_CACHELINE        2
14856
14857 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14858 {
14859         int cacheline_size;
14860         u8 byte;
14861         int goal;
14862
14863         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14864         if (byte == 0)
14865                 cacheline_size = 1024;
14866         else
14867                 cacheline_size = (int) byte * 4;
14868
14869         /* On 5703 and later chips, the boundary bits have no
14870          * effect.
14871          */
14872         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14873             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14874             !tg3_flag(tp, PCI_EXPRESS))
14875                 goto out;
14876
14877 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14878         goal = BOUNDARY_MULTI_CACHELINE;
14879 #else
14880 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14881         goal = BOUNDARY_SINGLE_CACHELINE;
14882 #else
14883         goal = 0;
14884 #endif
14885 #endif
14886
14887         if (tg3_flag(tp, 57765_PLUS)) {
14888                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14889                 goto out;
14890         }
14891
14892         if (!goal)
14893                 goto out;
14894
14895         /* PCI controllers on most RISC systems tend to disconnect
14896          * when a device tries to burst across a cache-line boundary.
14897          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14898          *
14899          * Unfortunately, for PCI-E there are only limited
14900          * write-side controls for this, and thus for reads
14901          * we will still get the disconnects.  We'll also waste
14902          * these PCI cycles for both read and write for chips
14903          * other than 5700 and 5701 which do not implement the
14904          * boundary bits.
14905          */
14906         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14907                 switch (cacheline_size) {
14908                 case 16:
14909                 case 32:
14910                 case 64:
14911                 case 128:
14912                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14913                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14914                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14915                         } else {
14916                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14917                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14918                         }
14919                         break;
14920
14921                 case 256:
14922                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14923                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14924                         break;
14925
14926                 default:
14927                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14928                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14929                         break;
14930                 }
14931         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14932                 switch (cacheline_size) {
14933                 case 16:
14934                 case 32:
14935                 case 64:
14936                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14937                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14938                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14939                                 break;
14940                         }
14941                         /* fallthrough */
14942                 case 128:
14943                 default:
14944                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14945                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14946                         break;
14947                 }
14948         } else {
14949                 switch (cacheline_size) {
14950                 case 16:
14951                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14952                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14953                                         DMA_RWCTRL_WRITE_BNDRY_16);
14954                                 break;
14955                         }
14956                         /* fallthrough */
14957                 case 32:
14958                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14959                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14960                                         DMA_RWCTRL_WRITE_BNDRY_32);
14961                                 break;
14962                         }
14963                         /* fallthrough */
14964                 case 64:
14965                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14966                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14967                                         DMA_RWCTRL_WRITE_BNDRY_64);
14968                                 break;
14969                         }
14970                         /* fallthrough */
14971                 case 128:
14972                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14973                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14974                                         DMA_RWCTRL_WRITE_BNDRY_128);
14975                                 break;
14976                         }
14977                         /* fallthrough */
14978                 case 256:
14979                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14980                                 DMA_RWCTRL_WRITE_BNDRY_256);
14981                         break;
14982                 case 512:
14983                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14984                                 DMA_RWCTRL_WRITE_BNDRY_512);
14985                         break;
14986                 case 1024:
14987                 default:
14988                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14989                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14990                         break;
14991                 }
14992         }
14993
14994 out:
14995         return val;
14996 }
14997
14998 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14999 {
15000         struct tg3_internal_buffer_desc test_desc;
15001         u32 sram_dma_descs;
15002         int i, ret;
15003
15004         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15005
15006         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15007         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15008         tw32(RDMAC_STATUS, 0);
15009         tw32(WDMAC_STATUS, 0);
15010
15011         tw32(BUFMGR_MODE, 0);
15012         tw32(FTQ_RESET, 0);
15013
15014         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15015         test_desc.addr_lo = buf_dma & 0xffffffff;
15016         test_desc.nic_mbuf = 0x00002100;
15017         test_desc.len = size;
15018
15019         /*
15020          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15021          * the *second* time the tg3 driver was getting loaded after an
15022          * initial scan.
15023          *
15024          * Broadcom tells me:
15025          *   ...the DMA engine is connected to the GRC block and a DMA
15026          *   reset may affect the GRC block in some unpredictable way...
15027          *   The behavior of resets to individual blocks has not been tested.
15028          *
15029          * Broadcom noted the GRC reset will also reset all sub-components.
15030          */
15031         if (to_device) {
15032                 test_desc.cqid_sqid = (13 << 8) | 2;
15033
15034                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15035                 udelay(40);
15036         } else {
15037                 test_desc.cqid_sqid = (16 << 8) | 7;
15038
15039                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15040                 udelay(40);
15041         }
15042         test_desc.flags = 0x00000005;
15043
15044         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15045                 u32 val;
15046
15047                 val = *(((u32 *)&test_desc) + i);
15048                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15049                                        sram_dma_descs + (i * sizeof(u32)));
15050                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15051         }
15052         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15053
15054         if (to_device)
15055                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15056         else
15057                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15058
15059         ret = -ENODEV;
15060         for (i = 0; i < 40; i++) {
15061                 u32 val;
15062
15063                 if (to_device)
15064                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15065                 else
15066                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15067                 if ((val & 0xffff) == sram_dma_descs) {
15068                         ret = 0;
15069                         break;
15070                 }
15071
15072                 udelay(100);
15073         }
15074
15075         return ret;
15076 }
15077
15078 #define TEST_BUFFER_SIZE        0x2000
15079
15080 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15081         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15082         { },
15083 };
15084
15085 static int __devinit tg3_test_dma(struct tg3 *tp)
15086 {
15087         dma_addr_t buf_dma;
15088         u32 *buf, saved_dma_rwctrl;
15089         int ret = 0;
15090
15091         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15092                                  &buf_dma, GFP_KERNEL);
15093         if (!buf) {
15094                 ret = -ENOMEM;
15095                 goto out_nofree;
15096         }
15097
15098         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15099                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15100
15101         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15102
15103         if (tg3_flag(tp, 57765_PLUS))
15104                 goto out;
15105
15106         if (tg3_flag(tp, PCI_EXPRESS)) {
15107                 /* DMA read watermark not used on PCIE */
15108                 tp->dma_rwctrl |= 0x00180000;
15109         } else if (!tg3_flag(tp, PCIX_MODE)) {
15110                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15111                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15112                         tp->dma_rwctrl |= 0x003f0000;
15113                 else
15114                         tp->dma_rwctrl |= 0x003f000f;
15115         } else {
15116                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15117                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15118                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15119                         u32 read_water = 0x7;
15120
15121                         /* If the 5704 is behind the EPB bridge, we can
15122                          * do the less restrictive ONE_DMA workaround for
15123                          * better performance.
15124                          */
15125                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15126                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15127                                 tp->dma_rwctrl |= 0x8000;
15128                         else if (ccval == 0x6 || ccval == 0x7)
15129                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15130
15131                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15132                                 read_water = 4;
15133                         /* Set bit 23 to enable PCIX hw bug fix */
15134                         tp->dma_rwctrl |=
15135                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15136                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15137                                 (1 << 23);
15138                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15139                         /* 5780 always in PCIX mode */
15140                         tp->dma_rwctrl |= 0x00144000;
15141                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15142                         /* 5714 always in PCIX mode */
15143                         tp->dma_rwctrl |= 0x00148000;
15144                 } else {
15145                         tp->dma_rwctrl |= 0x001b000f;
15146                 }
15147         }
15148
15149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15151                 tp->dma_rwctrl &= 0xfffffff0;
15152
15153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15155                 /* Remove this if it causes problems for some boards. */
15156                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15157
15158                 /* On 5700/5701 chips, we need to set this bit.
15159                  * Otherwise the chip will issue cacheline transactions
15160                  * to streamable DMA memory with not all the byte
15161                  * enables turned on.  This is an error on several
15162                  * RISC PCI controllers, in particular sparc64.
15163                  *
15164                  * On 5703/5704 chips, this bit has been reassigned
15165                  * a different meaning.  In particular, it is used
15166                  * on those chips to enable a PCI-X workaround.
15167                  */
15168                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15169         }
15170
15171         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15172
15173 #if 0
15174         /* Unneeded, already done by tg3_get_invariants.  */
15175         tg3_switch_clocks(tp);
15176 #endif
15177
15178         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15179             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15180                 goto out;
15181
15182         /* It is best to perform DMA test with maximum write burst size
15183          * to expose the 5700/5701 write DMA bug.
15184          */
15185         saved_dma_rwctrl = tp->dma_rwctrl;
15186         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15187         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15188
15189         while (1) {
15190                 u32 *p = buf, i;
15191
15192                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15193                         p[i] = i;
15194
15195                 /* Send the buffer to the chip. */
15196                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15197                 if (ret) {
15198                         dev_err(&tp->pdev->dev,
15199                                 "%s: Buffer write failed. err = %d\n",
15200                                 __func__, ret);
15201                         break;
15202                 }
15203
15204 #if 0
15205                 /* validate data reached card RAM correctly. */
15206                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15207                         u32 val;
15208                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15209                         if (le32_to_cpu(val) != p[i]) {
15210                                 dev_err(&tp->pdev->dev,
15211                                         "%s: Buffer corrupted on device! "
15212                                         "(%d != %d)\n", __func__, val, i);
15213                                 /* ret = -ENODEV here? */
15214                         }
15215                         p[i] = 0;
15216                 }
15217 #endif
15218                 /* Now read it back. */
15219                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15220                 if (ret) {
15221                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15222                                 "err = %d\n", __func__, ret);
15223                         break;
15224                 }
15225
15226                 /* Verify it. */
15227                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15228                         if (p[i] == i)
15229                                 continue;
15230
15231                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15232                             DMA_RWCTRL_WRITE_BNDRY_16) {
15233                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15234                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15235                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15236                                 break;
15237                         } else {
15238                                 dev_err(&tp->pdev->dev,
15239                                         "%s: Buffer corrupted on read back! "
15240                                         "(%d != %d)\n", __func__, p[i], i);
15241                                 ret = -ENODEV;
15242                                 goto out;
15243                         }
15244                 }
15245
15246                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15247                         /* Success. */
15248                         ret = 0;
15249                         break;
15250                 }
15251         }
15252         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15253             DMA_RWCTRL_WRITE_BNDRY_16) {
15254                 /* DMA test passed without adjusting DMA boundary,
15255                  * now look for chipsets that are known to expose the
15256                  * DMA bug without failing the test.
15257                  */
15258                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15259                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15260                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15261                 } else {
15262                         /* Safe to use the calculated DMA boundary. */
15263                         tp->dma_rwctrl = saved_dma_rwctrl;
15264                 }
15265
15266                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15267         }
15268
15269 out:
15270         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15271 out_nofree:
15272         return ret;
15273 }
15274
15275 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15276 {
15277         if (tg3_flag(tp, 57765_PLUS)) {
15278                 tp->bufmgr_config.mbuf_read_dma_low_water =
15279                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15280                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15281                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15282                 tp->bufmgr_config.mbuf_high_water =
15283                         DEFAULT_MB_HIGH_WATER_57765;
15284
15285                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15286                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15287                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15288                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15289                 tp->bufmgr_config.mbuf_high_water_jumbo =
15290                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15291         } else if (tg3_flag(tp, 5705_PLUS)) {
15292                 tp->bufmgr_config.mbuf_read_dma_low_water =
15293                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15294                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15295                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15296                 tp->bufmgr_config.mbuf_high_water =
15297                         DEFAULT_MB_HIGH_WATER_5705;
15298                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15299                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15300                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15301                         tp->bufmgr_config.mbuf_high_water =
15302                                 DEFAULT_MB_HIGH_WATER_5906;
15303                 }
15304
15305                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15306                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15307                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15308                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15309                 tp->bufmgr_config.mbuf_high_water_jumbo =
15310                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15311         } else {
15312                 tp->bufmgr_config.mbuf_read_dma_low_water =
15313                         DEFAULT_MB_RDMA_LOW_WATER;
15314                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15315                         DEFAULT_MB_MACRX_LOW_WATER;
15316                 tp->bufmgr_config.mbuf_high_water =
15317                         DEFAULT_MB_HIGH_WATER;
15318
15319                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15320                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15321                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15322                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15323                 tp->bufmgr_config.mbuf_high_water_jumbo =
15324                         DEFAULT_MB_HIGH_WATER_JUMBO;
15325         }
15326
15327         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15328         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15329 }
15330
15331 static char * __devinit tg3_phy_string(struct tg3 *tp)
15332 {
15333         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15334         case TG3_PHY_ID_BCM5400:        return "5400";
15335         case TG3_PHY_ID_BCM5401:        return "5401";
15336         case TG3_PHY_ID_BCM5411:        return "5411";
15337         case TG3_PHY_ID_BCM5701:        return "5701";
15338         case TG3_PHY_ID_BCM5703:        return "5703";
15339         case TG3_PHY_ID_BCM5704:        return "5704";
15340         case TG3_PHY_ID_BCM5705:        return "5705";
15341         case TG3_PHY_ID_BCM5750:        return "5750";
15342         case TG3_PHY_ID_BCM5752:        return "5752";
15343         case TG3_PHY_ID_BCM5714:        return "5714";
15344         case TG3_PHY_ID_BCM5780:        return "5780";
15345         case TG3_PHY_ID_BCM5755:        return "5755";
15346         case TG3_PHY_ID_BCM5787:        return "5787";
15347         case TG3_PHY_ID_BCM5784:        return "5784";
15348         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15349         case TG3_PHY_ID_BCM5906:        return "5906";
15350         case TG3_PHY_ID_BCM5761:        return "5761";
15351         case TG3_PHY_ID_BCM5718C:       return "5718C";
15352         case TG3_PHY_ID_BCM5718S:       return "5718S";
15353         case TG3_PHY_ID_BCM57765:       return "57765";
15354         case TG3_PHY_ID_BCM5719C:       return "5719C";
15355         case TG3_PHY_ID_BCM5720C:       return "5720C";
15356         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15357         case 0:                 return "serdes";
15358         default:                return "unknown";
15359         }
15360 }
15361
15362 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15363 {
15364         if (tg3_flag(tp, PCI_EXPRESS)) {
15365                 strcpy(str, "PCI Express");
15366                 return str;
15367         } else if (tg3_flag(tp, PCIX_MODE)) {
15368                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15369
15370                 strcpy(str, "PCIX:");
15371
15372                 if ((clock_ctrl == 7) ||
15373                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15374                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15375                         strcat(str, "133MHz");
15376                 else if (clock_ctrl == 0)
15377                         strcat(str, "33MHz");
15378                 else if (clock_ctrl == 2)
15379                         strcat(str, "50MHz");
15380                 else if (clock_ctrl == 4)
15381                         strcat(str, "66MHz");
15382                 else if (clock_ctrl == 6)
15383                         strcat(str, "100MHz");
15384         } else {
15385                 strcpy(str, "PCI:");
15386                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15387                         strcat(str, "66MHz");
15388                 else
15389                         strcat(str, "33MHz");
15390         }
15391         if (tg3_flag(tp, PCI_32BIT))
15392                 strcat(str, ":32-bit");
15393         else
15394                 strcat(str, ":64-bit");
15395         return str;
15396 }
15397
15398 static void __devinit tg3_init_coal(struct tg3 *tp)
15399 {
15400         struct ethtool_coalesce *ec = &tp->coal;
15401
15402         memset(ec, 0, sizeof(*ec));
15403         ec->cmd = ETHTOOL_GCOALESCE;
15404         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15405         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15406         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15407         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15408         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15409         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15410         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15411         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15412         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15413
15414         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15415                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15416                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15417                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15418                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15419                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15420         }
15421
15422         if (tg3_flag(tp, 5705_PLUS)) {
15423                 ec->rx_coalesce_usecs_irq = 0;
15424                 ec->tx_coalesce_usecs_irq = 0;
15425                 ec->stats_block_coalesce_usecs = 0;
15426         }
15427 }
15428
15429 static int __devinit tg3_init_one(struct pci_dev *pdev,
15430                                   const struct pci_device_id *ent)
15431 {
15432         struct net_device *dev;
15433         struct tg3 *tp;
15434         int i, err, pm_cap;
15435         u32 sndmbx, rcvmbx, intmbx;
15436         char str[40];
15437         u64 dma_mask, persist_dma_mask;
15438         netdev_features_t features = 0;
15439
15440         printk_once(KERN_INFO "%s\n", version);
15441
15442         err = pci_enable_device(pdev);
15443         if (err) {
15444                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15445                 return err;
15446         }
15447
15448         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15449         if (err) {
15450                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15451                 goto err_out_disable_pdev;
15452         }
15453
15454         pci_set_master(pdev);
15455
15456         /* Find power-management capability. */
15457         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15458         if (pm_cap == 0) {
15459                 dev_err(&pdev->dev,
15460                         "Cannot find Power Management capability, aborting\n");
15461                 err = -EIO;
15462                 goto err_out_free_res;
15463         }
15464
15465         err = pci_set_power_state(pdev, PCI_D0);
15466         if (err) {
15467                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15468                 goto err_out_free_res;
15469         }
15470
15471         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15472         if (!dev) {
15473                 err = -ENOMEM;
15474                 goto err_out_power_down;
15475         }
15476
15477         SET_NETDEV_DEV(dev, &pdev->dev);
15478
15479         tp = netdev_priv(dev);
15480         tp->pdev = pdev;
15481         tp->dev = dev;
15482         tp->pm_cap = pm_cap;
15483         tp->rx_mode = TG3_DEF_RX_MODE;
15484         tp->tx_mode = TG3_DEF_TX_MODE;
15485
15486         if (tg3_debug > 0)
15487                 tp->msg_enable = tg3_debug;
15488         else
15489                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15490
15491         /* The word/byte swap controls here control register access byte
15492          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15493          * setting below.
15494          */
15495         tp->misc_host_ctrl =
15496                 MISC_HOST_CTRL_MASK_PCI_INT |
15497                 MISC_HOST_CTRL_WORD_SWAP |
15498                 MISC_HOST_CTRL_INDIR_ACCESS |
15499                 MISC_HOST_CTRL_PCISTATE_RW;
15500
15501         /* The NONFRM (non-frame) byte/word swap controls take effect
15502          * on descriptor entries, anything which isn't packet data.
15503          *
15504          * The StrongARM chips on the board (one for tx, one for rx)
15505          * are running in big-endian mode.
15506          */
15507         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15508                         GRC_MODE_WSWAP_NONFRM_DATA);
15509 #ifdef __BIG_ENDIAN
15510         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15511 #endif
15512         spin_lock_init(&tp->lock);
15513         spin_lock_init(&tp->indirect_lock);
15514         INIT_WORK(&tp->reset_task, tg3_reset_task);
15515
15516         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15517         if (!tp->regs) {
15518                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15519                 err = -ENOMEM;
15520                 goto err_out_free_dev;
15521         }
15522
15523         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15524             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15525             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15526             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15527             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15528             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15529             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15530             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15531                 tg3_flag_set(tp, ENABLE_APE);
15532                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15533                 if (!tp->aperegs) {
15534                         dev_err(&pdev->dev,
15535                                 "Cannot map APE registers, aborting\n");
15536                         err = -ENOMEM;
15537                         goto err_out_iounmap;
15538                 }
15539         }
15540
15541         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15542         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15543
15544         dev->ethtool_ops = &tg3_ethtool_ops;
15545         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15546         dev->netdev_ops = &tg3_netdev_ops;
15547         dev->irq = pdev->irq;
15548
15549         err = tg3_get_invariants(tp);
15550         if (err) {
15551                 dev_err(&pdev->dev,
15552                         "Problem fetching invariants of chip, aborting\n");
15553                 goto err_out_apeunmap;
15554         }
15555
15556         /* The EPB bridge inside 5714, 5715, and 5780 and any
15557          * device behind the EPB cannot support DMA addresses > 40-bit.
15558          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15559          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15560          * do DMA address check in tg3_start_xmit().
15561          */
15562         if (tg3_flag(tp, IS_5788))
15563                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15564         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15565                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15566 #ifdef CONFIG_HIGHMEM
15567                 dma_mask = DMA_BIT_MASK(64);
15568 #endif
15569         } else
15570                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15571
15572         /* Configure DMA attributes. */
15573         if (dma_mask > DMA_BIT_MASK(32)) {
15574                 err = pci_set_dma_mask(pdev, dma_mask);
15575                 if (!err) {
15576                         features |= NETIF_F_HIGHDMA;
15577                         err = pci_set_consistent_dma_mask(pdev,
15578                                                           persist_dma_mask);
15579                         if (err < 0) {
15580                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15581                                         "DMA for consistent allocations\n");
15582                                 goto err_out_apeunmap;
15583                         }
15584                 }
15585         }
15586         if (err || dma_mask == DMA_BIT_MASK(32)) {
15587                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15588                 if (err) {
15589                         dev_err(&pdev->dev,
15590                                 "No usable DMA configuration, aborting\n");
15591                         goto err_out_apeunmap;
15592                 }
15593         }
15594
15595         tg3_init_bufmgr_config(tp);
15596
15597         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15598
15599         /* 5700 B0 chips do not support checksumming correctly due
15600          * to hardware bugs.
15601          */
15602         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15603                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15604
15605                 if (tg3_flag(tp, 5755_PLUS))
15606                         features |= NETIF_F_IPV6_CSUM;
15607         }
15608
15609         /* TSO is on by default on chips that support hardware TSO.
15610          * Firmware TSO on older chips gives lower performance, so it
15611          * is off by default, but can be enabled using ethtool.
15612          */
15613         if ((tg3_flag(tp, HW_TSO_1) ||
15614              tg3_flag(tp, HW_TSO_2) ||
15615              tg3_flag(tp, HW_TSO_3)) &&
15616             (features & NETIF_F_IP_CSUM))
15617                 features |= NETIF_F_TSO;
15618         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15619                 if (features & NETIF_F_IPV6_CSUM)
15620                         features |= NETIF_F_TSO6;
15621                 if (tg3_flag(tp, HW_TSO_3) ||
15622                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15623                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15624                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15625                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15626                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15627                         features |= NETIF_F_TSO_ECN;
15628         }
15629
15630         dev->features |= features;
15631         dev->vlan_features |= features;
15632
15633         /*
15634          * Add loopback capability only for a subset of devices that support
15635          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15636          * loopback for the remaining devices.
15637          */
15638         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15639             !tg3_flag(tp, CPMU_PRESENT))
15640                 /* Add the loopback capability */
15641                 features |= NETIF_F_LOOPBACK;
15642
15643         dev->hw_features |= features;
15644
15645         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15646             !tg3_flag(tp, TSO_CAPABLE) &&
15647             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15648                 tg3_flag_set(tp, MAX_RXPEND_64);
15649                 tp->rx_pending = 63;
15650         }
15651
15652         err = tg3_get_device_address(tp);
15653         if (err) {
15654                 dev_err(&pdev->dev,
15655                         "Could not obtain valid ethernet address, aborting\n");
15656                 goto err_out_apeunmap;
15657         }
15658
15659         /*
15660          * Reset chip in case UNDI or EFI driver did not shutdown
15661          * DMA self test will enable WDMAC and we'll see (spurious)
15662          * pending DMA on the PCI bus at that point.
15663          */
15664         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15665             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15666                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15667                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15668         }
15669
15670         err = tg3_test_dma(tp);
15671         if (err) {
15672                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15673                 goto err_out_apeunmap;
15674         }
15675
15676         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15677         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15678         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15679         for (i = 0; i < tp->irq_max; i++) {
15680                 struct tg3_napi *tnapi = &tp->napi[i];
15681
15682                 tnapi->tp = tp;
15683                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15684
15685                 tnapi->int_mbox = intmbx;
15686                 if (i <= 4)
15687                         intmbx += 0x8;
15688                 else
15689                         intmbx += 0x4;
15690
15691                 tnapi->consmbox = rcvmbx;
15692                 tnapi->prodmbox = sndmbx;
15693
15694                 if (i)
15695                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15696                 else
15697                         tnapi->coal_now = HOSTCC_MODE_NOW;
15698
15699                 if (!tg3_flag(tp, SUPPORT_MSIX))
15700                         break;
15701
15702                 /*
15703                  * If we support MSIX, we'll be using RSS.  If we're using
15704                  * RSS, the first vector only handles link interrupts and the
15705                  * remaining vectors handle rx and tx interrupts.  Reuse the
15706                  * mailbox values for the next iteration.  The values we setup
15707                  * above are still useful for the single vectored mode.
15708                  */
15709                 if (!i)
15710                         continue;
15711
15712                 rcvmbx += 0x8;
15713
15714                 if (sndmbx & 0x4)
15715                         sndmbx -= 0x4;
15716                 else
15717                         sndmbx += 0xc;
15718         }
15719
15720         tg3_init_coal(tp);
15721
15722         pci_set_drvdata(pdev, dev);
15723
15724         if (tg3_flag(tp, 5717_PLUS)) {
15725                 /* Resume a low-power mode */
15726                 tg3_frob_aux_power(tp, false);
15727         }
15728
15729         err = register_netdev(dev);
15730         if (err) {
15731                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15732                 goto err_out_apeunmap;
15733         }
15734
15735         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15736                     tp->board_part_number,
15737                     tp->pci_chip_rev_id,
15738                     tg3_bus_string(tp, str),
15739                     dev->dev_addr);
15740
15741         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15742                 struct phy_device *phydev;
15743                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15744                 netdev_info(dev,
15745                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15746                             phydev->drv->name, dev_name(&phydev->dev));
15747         } else {
15748                 char *ethtype;
15749
15750                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15751                         ethtype = "10/100Base-TX";
15752                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15753                         ethtype = "1000Base-SX";
15754                 else
15755                         ethtype = "10/100/1000Base-T";
15756
15757                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15758                             "(WireSpeed[%d], EEE[%d])\n",
15759                             tg3_phy_string(tp), ethtype,
15760                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15761                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15762         }
15763
15764         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15765                     (dev->features & NETIF_F_RXCSUM) != 0,
15766                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15767                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15768                     tg3_flag(tp, ENABLE_ASF) != 0,
15769                     tg3_flag(tp, TSO_CAPABLE) != 0);
15770         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15771                     tp->dma_rwctrl,
15772                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15773                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15774
15775         pci_save_state(pdev);
15776
15777         return 0;
15778
15779 err_out_apeunmap:
15780         if (tp->aperegs) {
15781                 iounmap(tp->aperegs);
15782                 tp->aperegs = NULL;
15783         }
15784
15785 err_out_iounmap:
15786         if (tp->regs) {
15787                 iounmap(tp->regs);
15788                 tp->regs = NULL;
15789         }
15790
15791 err_out_free_dev:
15792         free_netdev(dev);
15793
15794 err_out_power_down:
15795         pci_set_power_state(pdev, PCI_D3hot);
15796
15797 err_out_free_res:
15798         pci_release_regions(pdev);
15799
15800 err_out_disable_pdev:
15801         pci_disable_device(pdev);
15802         pci_set_drvdata(pdev, NULL);
15803         return err;
15804 }
15805
15806 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15807 {
15808         struct net_device *dev = pci_get_drvdata(pdev);
15809
15810         if (dev) {
15811                 struct tg3 *tp = netdev_priv(dev);
15812
15813                 if (tp->fw)
15814                         release_firmware(tp->fw);
15815
15816                 tg3_reset_task_cancel(tp);
15817
15818                 if (tg3_flag(tp, USE_PHYLIB)) {
15819                         tg3_phy_fini(tp);
15820                         tg3_mdio_fini(tp);
15821                 }
15822
15823                 unregister_netdev(dev);
15824                 if (tp->aperegs) {
15825                         iounmap(tp->aperegs);
15826                         tp->aperegs = NULL;
15827                 }
15828                 if (tp->regs) {
15829                         iounmap(tp->regs);
15830                         tp->regs = NULL;
15831                 }
15832                 free_netdev(dev);
15833                 pci_release_regions(pdev);
15834                 pci_disable_device(pdev);
15835                 pci_set_drvdata(pdev, NULL);
15836         }
15837 }
15838
15839 #ifdef CONFIG_PM_SLEEP
15840 static int tg3_suspend(struct device *device)
15841 {
15842         struct pci_dev *pdev = to_pci_dev(device);
15843         struct net_device *dev = pci_get_drvdata(pdev);
15844         struct tg3 *tp = netdev_priv(dev);
15845         int err;
15846
15847         if (!netif_running(dev))
15848                 return 0;
15849
15850         tg3_reset_task_cancel(tp);
15851         tg3_phy_stop(tp);
15852         tg3_netif_stop(tp);
15853
15854         del_timer_sync(&tp->timer);
15855
15856         tg3_full_lock(tp, 1);
15857         tg3_disable_ints(tp);
15858         tg3_full_unlock(tp);
15859
15860         netif_device_detach(dev);
15861
15862         tg3_full_lock(tp, 0);
15863         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15864         tg3_flag_clear(tp, INIT_COMPLETE);
15865         tg3_full_unlock(tp);
15866
15867         err = tg3_power_down_prepare(tp);
15868         if (err) {
15869                 int err2;
15870
15871                 tg3_full_lock(tp, 0);
15872
15873                 tg3_flag_set(tp, INIT_COMPLETE);
15874                 err2 = tg3_restart_hw(tp, 1);
15875                 if (err2)
15876                         goto out;
15877
15878                 tp->timer.expires = jiffies + tp->timer_offset;
15879                 add_timer(&tp->timer);
15880
15881                 netif_device_attach(dev);
15882                 tg3_netif_start(tp);
15883
15884 out:
15885                 tg3_full_unlock(tp);
15886
15887                 if (!err2)
15888                         tg3_phy_start(tp);
15889         }
15890
15891         return err;
15892 }
15893
15894 static int tg3_resume(struct device *device)
15895 {
15896         struct pci_dev *pdev = to_pci_dev(device);
15897         struct net_device *dev = pci_get_drvdata(pdev);
15898         struct tg3 *tp = netdev_priv(dev);
15899         int err;
15900
15901         if (!netif_running(dev))
15902                 return 0;
15903
15904         netif_device_attach(dev);
15905
15906         tg3_full_lock(tp, 0);
15907
15908         tg3_flag_set(tp, INIT_COMPLETE);
15909         err = tg3_restart_hw(tp, 1);
15910         if (err)
15911                 goto out;
15912
15913         tp->timer.expires = jiffies + tp->timer_offset;
15914         add_timer(&tp->timer);
15915
15916         tg3_netif_start(tp);
15917
15918 out:
15919         tg3_full_unlock(tp);
15920
15921         if (!err)
15922                 tg3_phy_start(tp);
15923
15924         return err;
15925 }
15926
15927 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15928 #define TG3_PM_OPS (&tg3_pm_ops)
15929
15930 #else
15931
15932 #define TG3_PM_OPS NULL
15933
15934 #endif /* CONFIG_PM_SLEEP */
15935
15936 /**
15937  * tg3_io_error_detected - called when PCI error is detected
15938  * @pdev: Pointer to PCI device
15939  * @state: The current pci connection state
15940  *
15941  * This function is called after a PCI bus error affecting
15942  * this device has been detected.
15943  */
15944 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15945                                               pci_channel_state_t state)
15946 {
15947         struct net_device *netdev = pci_get_drvdata(pdev);
15948         struct tg3 *tp = netdev_priv(netdev);
15949         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15950
15951         netdev_info(netdev, "PCI I/O error detected\n");
15952
15953         rtnl_lock();
15954
15955         if (!netif_running(netdev))
15956                 goto done;
15957
15958         tg3_phy_stop(tp);
15959
15960         tg3_netif_stop(tp);
15961
15962         del_timer_sync(&tp->timer);
15963
15964         /* Want to make sure that the reset task doesn't run */
15965         tg3_reset_task_cancel(tp);
15966         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15967
15968         netif_device_detach(netdev);
15969
15970         /* Clean up software state, even if MMIO is blocked */
15971         tg3_full_lock(tp, 0);
15972         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15973         tg3_full_unlock(tp);
15974
15975 done:
15976         if (state == pci_channel_io_perm_failure)
15977                 err = PCI_ERS_RESULT_DISCONNECT;
15978         else
15979                 pci_disable_device(pdev);
15980
15981         rtnl_unlock();
15982
15983         return err;
15984 }
15985
15986 /**
15987  * tg3_io_slot_reset - called after the pci bus has been reset.
15988  * @pdev: Pointer to PCI device
15989  *
15990  * Restart the card from scratch, as if from a cold-boot.
15991  * At this point, the card has exprienced a hard reset,
15992  * followed by fixups by BIOS, and has its config space
15993  * set up identically to what it was at cold boot.
15994  */
15995 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15996 {
15997         struct net_device *netdev = pci_get_drvdata(pdev);
15998         struct tg3 *tp = netdev_priv(netdev);
15999         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16000         int err;
16001
16002         rtnl_lock();
16003
16004         if (pci_enable_device(pdev)) {
16005                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16006                 goto done;
16007         }
16008
16009         pci_set_master(pdev);
16010         pci_restore_state(pdev);
16011         pci_save_state(pdev);
16012
16013         if (!netif_running(netdev)) {
16014                 rc = PCI_ERS_RESULT_RECOVERED;
16015                 goto done;
16016         }
16017
16018         err = tg3_power_up(tp);
16019         if (err)
16020                 goto done;
16021
16022         rc = PCI_ERS_RESULT_RECOVERED;
16023
16024 done:
16025         rtnl_unlock();
16026
16027         return rc;
16028 }
16029
16030 /**
16031  * tg3_io_resume - called when traffic can start flowing again.
16032  * @pdev: Pointer to PCI device
16033  *
16034  * This callback is called when the error recovery driver tells
16035  * us that its OK to resume normal operation.
16036  */
16037 static void tg3_io_resume(struct pci_dev *pdev)
16038 {
16039         struct net_device *netdev = pci_get_drvdata(pdev);
16040         struct tg3 *tp = netdev_priv(netdev);
16041         int err;
16042
16043         rtnl_lock();
16044
16045         if (!netif_running(netdev))
16046                 goto done;
16047
16048         tg3_full_lock(tp, 0);
16049         tg3_flag_set(tp, INIT_COMPLETE);
16050         err = tg3_restart_hw(tp, 1);
16051         tg3_full_unlock(tp);
16052         if (err) {
16053                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16054                 goto done;
16055         }
16056
16057         netif_device_attach(netdev);
16058
16059         tp->timer.expires = jiffies + tp->timer_offset;
16060         add_timer(&tp->timer);
16061
16062         tg3_netif_start(tp);
16063
16064         tg3_phy_start(tp);
16065
16066 done:
16067         rtnl_unlock();
16068 }
16069
16070 static struct pci_error_handlers tg3_err_handler = {
16071         .error_detected = tg3_io_error_detected,
16072         .slot_reset     = tg3_io_slot_reset,
16073         .resume         = tg3_io_resume
16074 };
16075
16076 static struct pci_driver tg3_driver = {
16077         .name           = DRV_MODULE_NAME,
16078         .id_table       = tg3_pci_tbl,
16079         .probe          = tg3_init_one,
16080         .remove         = __devexit_p(tg3_remove_one),
16081         .err_handler    = &tg3_err_handler,
16082         .driver.pm      = TG3_PM_OPS,
16083 };
16084
16085 static int __init tg3_init(void)
16086 {
16087         return pci_register_driver(&tg3_driver);
16088 }
16089
16090 static void __exit tg3_cleanup(void)
16091 {
16092         pci_unregister_driver(&tg3_driver);
16093 }
16094
16095 module_init(tg3_init);
16096 module_exit(tg3_cleanup);