]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Reduce UMP event collision window
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458         u32 reg, val;
1459
1460         val = 0;
1461         if (!tg3_readphy(tp, MII_BMCR, &reg))
1462                 val = reg << 16;
1463         if (!tg3_readphy(tp, MII_BMSR, &reg))
1464                 val |= (reg & 0xffff);
1465         *data++ = val;
1466
1467         val = 0;
1468         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469                 val = reg << 16;
1470         if (!tg3_readphy(tp, MII_LPA, &reg))
1471                 val |= (reg & 0xffff);
1472         *data++ = val;
1473
1474         val = 0;
1475         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477                         val = reg << 16;
1478                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479                         val |= (reg & 0xffff);
1480         }
1481         *data++ = val;
1482
1483         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484                 val = reg << 16;
1485         else
1486                 val = 0;
1487         *data++ = val;
1488 }
1489
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493         u32 data[4];
1494
1495         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496                 return;
1497
1498         tg3_phy_gather_ump_data(tp, data);
1499
1500         tg3_wait_for_event_ack(tp);
1501
1502         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508
1509         tg3_generate_fw_event(tp);
1510 }
1511
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516                 /* Wait for RX cpu to ACK the previous event. */
1517                 tg3_wait_for_event_ack(tp);
1518
1519                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521                 tg3_generate_fw_event(tp);
1522
1523                 /* Wait for RX cpu to ACK this event. */
1524                 tg3_wait_for_event_ack(tp);
1525         }
1526 }
1527
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535                 switch (kind) {
1536                 case RESET_KIND_INIT:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_START);
1539                         break;
1540
1541                 case RESET_KIND_SHUTDOWN:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_UNLOAD);
1544                         break;
1545
1546                 case RESET_KIND_SUSPEND:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_SUSPEND);
1549                         break;
1550
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         if (kind == RESET_KIND_INIT ||
1557             kind == RESET_KIND_SUSPEND)
1558                 tg3_ape_driver_state_change(tp, kind);
1559 }
1560
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565                 switch (kind) {
1566                 case RESET_KIND_INIT:
1567                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568                                       DRV_STATE_START_DONE);
1569                         break;
1570
1571                 case RESET_KIND_SHUTDOWN:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_UNLOAD_DONE);
1574                         break;
1575
1576                 default:
1577                         break;
1578                 }
1579         }
1580
1581         if (kind == RESET_KIND_SHUTDOWN)
1582                 tg3_ape_driver_state_change(tp, kind);
1583 }
1584
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588         if (tg3_flag(tp, ENABLE_ASF)) {
1589                 switch (kind) {
1590                 case RESET_KIND_INIT:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_START);
1593                         break;
1594
1595                 case RESET_KIND_SHUTDOWN:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_UNLOAD);
1598                         break;
1599
1600                 case RESET_KIND_SUSPEND:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_SUSPEND);
1603                         break;
1604
1605                 default:
1606                         break;
1607                 }
1608         }
1609 }
1610
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613         int i;
1614         u32 val;
1615
1616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617                 /* Wait up to 20ms for init done. */
1618                 for (i = 0; i < 200; i++) {
1619                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620                                 return 0;
1621                         udelay(100);
1622                 }
1623                 return -ENODEV;
1624         }
1625
1626         /* Wait for firmware initialization to complete. */
1627         for (i = 0; i < 100000; i++) {
1628                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630                         break;
1631                 udelay(10);
1632         }
1633
1634         /* Chip might not be fitted with firmware.  Some Sun onboard
1635          * parts are configured like that.  So don't signal the timeout
1636          * of the above loop as an error, but do report the lack of
1637          * running firmware once.
1638          */
1639         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642                 netdev_info(tp->dev, "No firmware running\n");
1643         }
1644
1645         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646                 /* The 57765 A0 needs a little more
1647                  * time to do some important work.
1648                  */
1649                 mdelay(10);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657         if (!netif_carrier_ok(tp->dev)) {
1658                 netif_info(tp, link, tp->dev, "Link is down\n");
1659                 tg3_ump_link_report(tp);
1660         } else if (netif_msg_link(tp)) {
1661                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662                             (tp->link_config.active_speed == SPEED_1000 ?
1663                              1000 :
1664                              (tp->link_config.active_speed == SPEED_100 ?
1665                               100 : 10)),
1666                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1667                              "full" : "half"));
1668
1669                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671                             "on" : "off",
1672                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673                             "on" : "off");
1674
1675                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676                         netdev_info(tp->dev, "EEE is %s\n",
1677                                     tp->setlpicnt ? "enabled" : "disabled");
1678
1679                 tg3_ump_link_report(tp);
1680         }
1681 }
1682
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685         u16 miireg;
1686
1687         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688                 miireg = ADVERTISE_1000XPAUSE;
1689         else if (flow_ctrl & FLOW_CTRL_TX)
1690                 miireg = ADVERTISE_1000XPSE_ASYM;
1691         else if (flow_ctrl & FLOW_CTRL_RX)
1692                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693         else
1694                 miireg = 0;
1695
1696         return miireg;
1697 }
1698
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701         u8 cap = 0;
1702
1703         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706                 if (lcladv & ADVERTISE_1000XPAUSE)
1707                         cap = FLOW_CTRL_RX;
1708                 if (rmtadv & ADVERTISE_1000XPAUSE)
1709                         cap = FLOW_CTRL_TX;
1710         }
1711
1712         return cap;
1713 }
1714
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717         u8 autoneg;
1718         u8 flowctrl = 0;
1719         u32 old_rx_mode = tp->rx_mode;
1720         u32 old_tx_mode = tp->tx_mode;
1721
1722         if (tg3_flag(tp, USE_PHYLIB))
1723                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724         else
1725                 autoneg = tp->link_config.autoneg;
1726
1727         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730                 else
1731                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732         } else
1733                 flowctrl = tp->link_config.flowctrl;
1734
1735         tp->link_config.active_flowctrl = flowctrl;
1736
1737         if (flowctrl & FLOW_CTRL_RX)
1738                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739         else
1740                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
1742         if (old_rx_mode != tp->rx_mode)
1743                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1744
1745         if (flowctrl & FLOW_CTRL_TX)
1746                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747         else
1748                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
1750         if (old_tx_mode != tp->tx_mode)
1751                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756         u8 oldflowctrl, linkmesg = 0;
1757         u32 mac_mode, lcl_adv, rmt_adv;
1758         struct tg3 *tp = netdev_priv(dev);
1759         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760
1761         spin_lock_bh(&tp->lock);
1762
1763         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764                                     MAC_MODE_HALF_DUPLEX);
1765
1766         oldflowctrl = tp->link_config.active_flowctrl;
1767
1768         if (phydev->link) {
1769                 lcl_adv = 0;
1770                 rmt_adv = 0;
1771
1772                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1774                 else if (phydev->speed == SPEED_1000 ||
1775                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777                 else
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779
1780                 if (phydev->duplex == DUPLEX_HALF)
1781                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1782                 else {
1783                         lcl_adv = mii_advertise_flowctrl(
1784                                   tp->link_config.flowctrl);
1785
1786                         if (phydev->pause)
1787                                 rmt_adv = LPA_PAUSE_CAP;
1788                         if (phydev->asym_pause)
1789                                 rmt_adv |= LPA_PAUSE_ASYM;
1790                 }
1791
1792                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793         } else
1794                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796         if (mac_mode != tp->mac_mode) {
1797                 tp->mac_mode = mac_mode;
1798                 tw32_f(MAC_MODE, tp->mac_mode);
1799                 udelay(40);
1800         }
1801
1802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803                 if (phydev->speed == SPEED_10)
1804                         tw32(MAC_MI_STAT,
1805                              MAC_MI_STAT_10MBPS_MODE |
1806                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807                 else
1808                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809         }
1810
1811         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812                 tw32(MAC_TX_LENGTHS,
1813                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814                       (6 << TX_LENGTHS_IPG_SHIFT) |
1815                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816         else
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1823             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1824             phydev->speed != tp->link_config.active_speed ||
1825             phydev->duplex != tp->link_config.active_duplex ||
1826             oldflowctrl != tp->link_config.active_flowctrl)
1827                 linkmesg = 1;
1828
1829         tp->link_config.active_speed = phydev->speed;
1830         tp->link_config.active_duplex = phydev->duplex;
1831
1832         spin_unlock_bh(&tp->lock);
1833
1834         if (linkmesg)
1835                 tg3_link_report(tp);
1836 }
1837
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840         struct phy_device *phydev;
1841
1842         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843                 return 0;
1844
1845         /* Bring the PHY back to a known state. */
1846         tg3_bmcr_reset(tp);
1847
1848         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849
1850         /* Attach the MAC to the PHY. */
1851         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852                              phydev->dev_flags, phydev->interface);
1853         if (IS_ERR(phydev)) {
1854                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855                 return PTR_ERR(phydev);
1856         }
1857
1858         /* Mask with MAC supported features. */
1859         switch (phydev->interface) {
1860         case PHY_INTERFACE_MODE_GMII:
1861         case PHY_INTERFACE_MODE_RGMII:
1862                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863                         phydev->supported &= (PHY_GBIT_FEATURES |
1864                                               SUPPORTED_Pause |
1865                                               SUPPORTED_Asym_Pause);
1866                         break;
1867                 }
1868                 /* fallthru */
1869         case PHY_INTERFACE_MODE_MII:
1870                 phydev->supported &= (PHY_BASIC_FEATURES |
1871                                       SUPPORTED_Pause |
1872                                       SUPPORTED_Asym_Pause);
1873                 break;
1874         default:
1875                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876                 return -EINVAL;
1877         }
1878
1879         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880
1881         phydev->advertising = phydev->supported;
1882
1883         return 0;
1884 }
1885
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888         struct phy_device *phydev;
1889
1890         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891                 return;
1892
1893         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894
1895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897                 phydev->speed = tp->link_config.orig_speed;
1898                 phydev->duplex = tp->link_config.orig_duplex;
1899                 phydev->autoneg = tp->link_config.orig_autoneg;
1900                 phydev->advertising = tp->link_config.orig_advertising;
1901         }
1902
1903         phy_start(phydev);
1904
1905         phy_start_aneg(phydev);
1906 }
1907
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911                 return;
1912
1913         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921         }
1922 }
1923
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926         int err;
1927         u32 val;
1928
1929         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930                 return 0;
1931
1932         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933                 /* Cannot do read-modify-write on 5401 */
1934                 err = tg3_phy_auxctl_write(tp,
1935                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937                                            0x4c20);
1938                 goto done;
1939         }
1940
1941         err = tg3_phy_auxctl_read(tp,
1942                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943         if (err)
1944                 return err;
1945
1946         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947         err = tg3_phy_auxctl_write(tp,
1948                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950 done:
1951         return err;
1952 }
1953
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956         u32 phytest;
1957
1958         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959                 u32 phy;
1960
1961                 tg3_writephy(tp, MII_TG3_FET_TEST,
1962                              phytest | MII_TG3_FET_SHADOW_EN);
1963                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964                         if (enable)
1965                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966                         else
1967                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969                 }
1970                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971         }
1972 }
1973
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976         u32 reg;
1977
1978         if (!tg3_flag(tp, 5705_PLUS) ||
1979             (tg3_flag(tp, 5717_PLUS) &&
1980              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981                 return;
1982
1983         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984                 tg3_phy_fet_toggle_apd(tp, enable);
1985                 return;
1986         }
1987
1988         reg = MII_TG3_MISC_SHDW_WREN |
1989               MII_TG3_MISC_SHDW_SCR5_SEL |
1990               MII_TG3_MISC_SHDW_SCR5_LPED |
1991               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992               MII_TG3_MISC_SHDW_SCR5_SDTL |
1993               MII_TG3_MISC_SHDW_SCR5_C125OE;
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_APD_SEL |
2002               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003         if (enable)
2004                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011         u32 phy;
2012
2013         if (!tg3_flag(tp, 5705_PLUS) ||
2014             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015                 return;
2016
2017         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018                 u32 ephy;
2019
2020                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023                         tg3_writephy(tp, MII_TG3_FET_TEST,
2024                                      ephy | MII_TG3_FET_SHADOW_EN);
2025                         if (!tg3_readphy(tp, reg, &phy)) {
2026                                 if (enable)
2027                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028                                 else
2029                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030                                 tg3_writephy(tp, reg, phy);
2031                         }
2032                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033                 }
2034         } else {
2035                 int ret;
2036
2037                 ret = tg3_phy_auxctl_read(tp,
2038                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039                 if (!ret) {
2040                         if (enable)
2041                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042                         else
2043                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044                         tg3_phy_auxctl_write(tp,
2045                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046                 }
2047         }
2048 }
2049
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052         int ret;
2053         u32 val;
2054
2055         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056                 return;
2057
2058         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059         if (!ret)
2060                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066         u32 otp, phy;
2067
2068         if (!tp->phy_otp)
2069                 return;
2070
2071         otp = tp->phy_otp;
2072
2073         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074                 return;
2075
2076         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
2098         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103         u32 val;
2104
2105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106                 return;
2107
2108         tp->setlpicnt = 0;
2109
2110         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111             current_link_up == 1 &&
2112             tp->link_config.active_duplex == DUPLEX_FULL &&
2113             (tp->link_config.active_speed == SPEED_100 ||
2114              tp->link_config.active_speed == SPEED_1000)) {
2115                 u32 eeectl;
2116
2117                 if (tp->link_config.active_speed == SPEED_1000)
2118                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119                 else
2120                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
2124                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125                                   TG3_CL45_D7_EEERES_STAT, &val);
2126
2127                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129                         tp->setlpicnt = 2;
2130         }
2131
2132         if (!tp->setlpicnt) {
2133                 if (current_link_up == 1 &&
2134                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138
2139                 val = tr32(TG3_CPMU_EEE_MODE);
2140                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141         }
2142 }
2143
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146         u32 val;
2147
2148         if (tp->link_config.active_speed == SPEED_1000 &&
2149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151              tg3_flag(tp, 57765_CLASS)) &&
2152             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153                 val = MII_TG3_DSP_TAP26_ALNOKO |
2154                       MII_TG3_DSP_TAP26_RMRXSTO;
2155                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157         }
2158
2159         val = tr32(TG3_CPMU_EEE_MODE);
2160         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165         int limit = 100;
2166
2167         while (limit--) {
2168                 u32 tmp32;
2169
2170                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171                         if ((tmp32 & 0x1000) == 0)
2172                                 break;
2173                 }
2174         }
2175         if (limit < 0)
2176                 return -EBUSY;
2177
2178         return 0;
2179 }
2180
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183         static const u32 test_pat[4][6] = {
2184         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188         };
2189         int chan;
2190
2191         for (chan = 0; chan < 4; chan++) {
2192                 int i;
2193
2194                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195                              (chan * 0x2000) | 0x0200);
2196                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197
2198                 for (i = 0; i < 6; i++)
2199                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200                                      test_pat[chan][i]);
2201
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203                 if (tg3_wait_macro_done(tp)) {
2204                         *resetp = 1;
2205                         return -EBUSY;
2206                 }
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209                              (chan * 0x2000) | 0x0200);
2210                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211                 if (tg3_wait_macro_done(tp)) {
2212                         *resetp = 1;
2213                         return -EBUSY;
2214                 }
2215
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 for (i = 0; i < 6; i += 2) {
2223                         u32 low, high;
2224
2225                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227                             tg3_wait_macro_done(tp)) {
2228                                 *resetp = 1;
2229                                 return -EBUSY;
2230                         }
2231                         low &= 0x7fff;
2232                         high &= 0x000f;
2233                         if (low != test_pat[chan][i] ||
2234                             high != test_pat[chan][i+1]) {
2235                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239                                 return -EBUSY;
2240                         }
2241                 }
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249         int chan;
2250
2251         for (chan = 0; chan < 4; chan++) {
2252                 int i;
2253
2254                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255                              (chan * 0x2000) | 0x0200);
2256                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257                 for (i = 0; i < 6; i++)
2258                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260                 if (tg3_wait_macro_done(tp))
2261                         return -EBUSY;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269         u32 reg32, phy9_orig;
2270         int retries, do_phy_reset, err;
2271
2272         retries = 10;
2273         do_phy_reset = 1;
2274         do {
2275                 if (do_phy_reset) {
2276                         err = tg3_bmcr_reset(tp);
2277                         if (err)
2278                                 return err;
2279                         do_phy_reset = 0;
2280                 }
2281
2282                 /* Disable transmitter and interrupt.  */
2283                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284                         continue;
2285
2286                 reg32 |= 0x3000;
2287                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289                 /* Set full-duplex, 1000 mbps.  */
2290                 tg3_writephy(tp, MII_BMCR,
2291                              BMCR_FULLDPLX | BMCR_SPEED1000);
2292
2293                 /* Set to master mode.  */
2294                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295                         continue;
2296
2297                 tg3_writephy(tp, MII_CTRL1000,
2298                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299
2300                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301                 if (err)
2302                         return err;
2303
2304                 /* Block the PHY control access.  */
2305                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2306
2307                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308                 if (!err)
2309                         break;
2310         } while (--retries);
2311
2312         err = tg3_phy_reset_chanpat(tp);
2313         if (err)
2314                 return err;
2315
2316         tg3_phydsp_write(tp, 0x8005, 0x0000);
2317
2318         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320
2321         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322
2323         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324
2325         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326                 reg32 &= ~0x3000;
2327                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328         } else if (!err)
2329                 err = -EBUSY;
2330
2331         return err;
2332 }
2333
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339         u32 val, cpmuctrl;
2340         int err;
2341
2342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343                 val = tr32(GRC_MISC_CFG);
2344                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345                 udelay(40);
2346         }
2347         err  = tg3_readphy(tp, MII_BMSR, &val);
2348         err |= tg3_readphy(tp, MII_BMSR, &val);
2349         if (err != 0)
2350                 return -EBUSY;
2351
2352         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353                 netif_carrier_off(tp->dev);
2354                 tg3_link_report(tp);
2355         }
2356
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360                 err = tg3_phy_reset_5703_4_5(tp);
2361                 if (err)
2362                         return err;
2363                 goto out;
2364         }
2365
2366         cpmuctrl = 0;
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371                         tw32(TG3_CPMU_CTRL,
2372                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373         }
2374
2375         err = tg3_bmcr_reset(tp);
2376         if (err)
2377                 return err;
2378
2379         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382
2383                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384         }
2385
2386         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2391                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392                         udelay(40);
2393                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394                 }
2395         }
2396
2397         if (tg3_flag(tp, 5717_PLUS) &&
2398             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399                 return 0;
2400
2401         tg3_phy_apply_otp(tp);
2402
2403         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404                 tg3_phy_toggle_apd(tp, true);
2405         else
2406                 tg3_phy_toggle_apd(tp, false);
2407
2408 out:
2409         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414         }
2415
2416         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2424                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2425                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427                 }
2428         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433                                 tg3_writephy(tp, MII_TG3_TEST1,
2434                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2435                         } else
2436                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         }
2441
2442         /* Set Extended packet length bit (bit 14) on all chips that */
2443         /* support jumbo frames */
2444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445                 /* Cannot do read-modify-write on 5401 */
2446                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448                 /* Set bit 14 with read-modify-write to preserve other bits */
2449                 err = tg3_phy_auxctl_read(tp,
2450                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451                 if (!err)
2452                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454         }
2455
2456         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457          * jumbo frames transmission.
2458          */
2459         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463         }
2464
2465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466                 /* adjust output voltage */
2467                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468         }
2469
2470         tg3_phy_toggle_automdix(tp, 1);
2471         tg3_phy_set_wirespeed(tp);
2472         return 0;
2473 }
2474
2475 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2477 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2478                                           TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493         u32 status, shift;
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498         else
2499                 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502         status &= ~(TG3_GPIO_MSG_MASK << shift);
2503         status |= (newstat << shift);
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508         else
2509                 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511         return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516         if (!tg3_flag(tp, IS_NIC))
2517                 return 0;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523                         return -EIO;
2524
2525                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531         } else {
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541         u32 grc_local_ctrl;
2542
2543         if (!tg3_flag(tp, IS_NIC) ||
2544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546                 return;
2547
2548         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550         tw32_wait_f(GRC_LOCAL_CTRL,
2551                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554         tw32_wait_f(GRC_LOCAL_CTRL,
2555                     grc_local_ctrl,
2556                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558         tw32_wait_f(GRC_LOCAL_CTRL,
2559                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565         if (!tg3_flag(tp, IS_NIC))
2566                 return;
2567
2568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571                             (GRC_LCLCTRL_GPIO_OE0 |
2572                              GRC_LCLCTRL_GPIO_OE1 |
2573                              GRC_LCLCTRL_GPIO_OE2 |
2574                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT1),
2576                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2577         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581                                      GRC_LCLCTRL_GPIO_OE1 |
2582                                      GRC_LCLCTRL_GPIO_OE2 |
2583                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2585                                      tp->grc_local_ctrl;
2586                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2596         } else {
2597                 u32 no_gpio2;
2598                 u32 grc_local_ctrl = 0;
2599
2600                 /* Workaround to prevent overdrawing Amps. */
2601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604                                     grc_local_ctrl,
2605                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2606                 }
2607
2608                 /* On 5753 and variants, GPIO2 cannot be used. */
2609                 no_gpio2 = tp->nic_sram_data_cfg &
2610                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613                                   GRC_LCLCTRL_GPIO_OE1 |
2614                                   GRC_LCLCTRL_GPIO_OE2 |
2615                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2617                 if (no_gpio2) {
2618                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2620                 }
2621                 tw32_wait_f(GRC_LOCAL_CTRL,
2622                             tp->grc_local_ctrl | grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 if (!no_gpio2) {
2632                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633                         tw32_wait_f(GRC_LOCAL_CTRL,
2634                                     tp->grc_local_ctrl | grc_local_ctrl,
2635                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2636                 }
2637         }
2638 }
2639
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642         u32 msg = 0;
2643
2644         /* Serialize power state transitions */
2645         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646                 return;
2647
2648         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649                 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651         msg = tg3_set_function_status(tp, msg);
2652
2653         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654                 goto done;
2655
2656         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657                 tg3_pwrsrc_switch_to_vaux(tp);
2658         else
2659                 tg3_pwrsrc_die_with_vmain(tp);
2660
2661 done:
2662         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667         bool need_vaux = false;
2668
2669         /* The GPIOs do something completely different on 57765. */
2670         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671                 return;
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676                 tg3_frob_aux_power_5717(tp, include_wol ?
2677                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678                 return;
2679         }
2680
2681         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682                 struct net_device *dev_peer;
2683
2684                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2685
2686                 /* remove_one() may have been run on the peer. */
2687                 if (dev_peer) {
2688                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
2690                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2691                                 return;
2692
2693                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694                             tg3_flag(tp_peer, ENABLE_ASF))
2695                                 need_vaux = true;
2696                 }
2697         }
2698
2699         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700             tg3_flag(tp, ENABLE_ASF))
2701                 need_vaux = true;
2702
2703         if (need_vaux)
2704                 tg3_pwrsrc_switch_to_vaux(tp);
2705         else
2706                 tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712                 return 1;
2713         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714                 if (speed != SPEED_10)
2715                         return 1;
2716         } else if (speed == SPEED_10)
2717                 return 1;
2718
2719         return 0;
2720 }
2721
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724         u32 val;
2725
2726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731                         sg_dig_ctrl |=
2732                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735                 }
2736                 return;
2737         }
2738
2739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740                 tg3_bmcr_reset(tp);
2741                 val = tr32(GRC_MISC_CFG);
2742                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743                 udelay(40);
2744                 return;
2745         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746                 u32 phytest;
2747                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748                         u32 phy;
2749
2750                         tg3_writephy(tp, MII_ADVERTISE, 0);
2751                         tg3_writephy(tp, MII_BMCR,
2752                                      BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754                         tg3_writephy(tp, MII_TG3_FET_TEST,
2755                                      phytest | MII_TG3_FET_SHADOW_EN);
2756                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758                                 tg3_writephy(tp,
2759                                              MII_TG3_FET_SHDW_AUXMODE4,
2760                                              phy);
2761                         }
2762                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763                 }
2764                 return;
2765         } else if (do_low_power) {
2766                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768
2769                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2772                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773         }
2774
2775         /* The PHY should not be powered down on some chips because
2776          * of bugs.
2777          */
2778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2782                 return;
2783
2784         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2786                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2790         }
2791
2792         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2793 }
2794
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3 *tp)
2797 {
2798         if (tg3_flag(tp, NVRAM)) {
2799                 int i;
2800
2801                 if (tp->nvram_lock_cnt == 0) {
2802                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803                         for (i = 0; i < 8000; i++) {
2804                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805                                         break;
2806                                 udelay(20);
2807                         }
2808                         if (i == 8000) {
2809                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810                                 return -ENODEV;
2811                         }
2812                 }
2813                 tp->nvram_lock_cnt++;
2814         }
2815         return 0;
2816 }
2817
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3 *tp)
2820 {
2821         if (tg3_flag(tp, NVRAM)) {
2822                 if (tp->nvram_lock_cnt > 0)
2823                         tp->nvram_lock_cnt--;
2824                 if (tp->nvram_lock_cnt == 0)
2825                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2846         }
2847 }
2848
2849 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850                                         u32 offset, u32 *val)
2851 {
2852         u32 tmp;
2853         int i;
2854
2855         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856                 return -EINVAL;
2857
2858         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859                                         EEPROM_ADDR_DEVID_MASK |
2860                                         EEPROM_ADDR_READ);
2861         tw32(GRC_EEPROM_ADDR,
2862              tmp |
2863              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865               EEPROM_ADDR_ADDR_MASK) |
2866              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2867
2868         for (i = 0; i < 1000; i++) {
2869                 tmp = tr32(GRC_EEPROM_ADDR);
2870
2871                 if (tmp & EEPROM_ADDR_COMPLETE)
2872                         break;
2873                 msleep(1);
2874         }
2875         if (!(tmp & EEPROM_ADDR_COMPLETE))
2876                 return -EBUSY;
2877
2878         tmp = tr32(GRC_EEPROM_DATA);
2879
2880         /*
2881          * The data will always be opposite the native endian
2882          * format.  Perform a blind byteswap to compensate.
2883          */
2884         *val = swab32(tmp);
2885
2886         return 0;
2887 }
2888
2889 #define NVRAM_CMD_TIMEOUT 10000
2890
2891 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2892 {
2893         int i;
2894
2895         tw32(NVRAM_CMD, nvram_cmd);
2896         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897                 udelay(10);
2898                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899                         udelay(10);
2900                         break;
2901                 }
2902         }
2903
2904         if (i == NVRAM_CMD_TIMEOUT)
2905                 return -EBUSY;
2906
2907         return 0;
2908 }
2909
2910 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2911 {
2912         if (tg3_flag(tp, NVRAM) &&
2913             tg3_flag(tp, NVRAM_BUFFERED) &&
2914             tg3_flag(tp, FLASH) &&
2915             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2916             (tp->nvram_jedecnum == JEDEC_ATMEL))
2917
2918                 addr = ((addr / tp->nvram_pagesize) <<
2919                         ATMEL_AT45DB0X1B_PAGE_POS) +
2920                        (addr % tp->nvram_pagesize);
2921
2922         return addr;
2923 }
2924
2925 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2926 {
2927         if (tg3_flag(tp, NVRAM) &&
2928             tg3_flag(tp, NVRAM_BUFFERED) &&
2929             tg3_flag(tp, FLASH) &&
2930             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2931             (tp->nvram_jedecnum == JEDEC_ATMEL))
2932
2933                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934                         tp->nvram_pagesize) +
2935                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2936
2937         return addr;
2938 }
2939
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941  * the byteswapping settings for all other register accesses.
2942  * tg3 devices are BE devices, so on a BE machine, the data
2943  * returned will be exactly as it is seen in NVRAM.  On a LE
2944  * machine, the 32-bit value will be byteswapped.
2945  */
2946 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2947 {
2948         int ret;
2949
2950         if (!tg3_flag(tp, NVRAM))
2951                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2952
2953         offset = tg3_nvram_phys_addr(tp, offset);
2954
2955         if (offset > NVRAM_ADDR_MSK)
2956                 return -EINVAL;
2957
2958         ret = tg3_nvram_lock(tp);
2959         if (ret)
2960                 return ret;
2961
2962         tg3_enable_nvram_access(tp);
2963
2964         tw32(NVRAM_ADDR, offset);
2965         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2967
2968         if (ret == 0)
2969                 *val = tr32(NVRAM_RDDATA);
2970
2971         tg3_disable_nvram_access(tp);
2972
2973         tg3_nvram_unlock(tp);
2974
2975         return ret;
2976 }
2977
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2980 {
2981         u32 v;
2982         int res = tg3_nvram_read(tp, offset, &v);
2983         if (!res)
2984                 *val = cpu_to_be32(v);
2985         return res;
2986 }
2987
2988 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989                                     u32 offset, u32 len, u8 *buf)
2990 {
2991         int i, j, rc = 0;
2992         u32 val;
2993
2994         for (i = 0; i < len; i += 4) {
2995                 u32 addr;
2996                 __be32 data;
2997
2998                 addr = offset + i;
2999
3000                 memcpy(&data, buf + i, 4);
3001
3002                 /*
3003                  * The SEEPROM interface expects the data to always be opposite
3004                  * the native endian format.  We accomplish this by reversing
3005                  * all the operations that would have been performed on the
3006                  * data from a call to tg3_nvram_read_be32().
3007                  */
3008                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3009
3010                 val = tr32(GRC_EEPROM_ADDR);
3011                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3012
3013                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014                         EEPROM_ADDR_READ);
3015                 tw32(GRC_EEPROM_ADDR, val |
3016                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017                         (addr & EEPROM_ADDR_ADDR_MASK) |
3018                         EEPROM_ADDR_START |
3019                         EEPROM_ADDR_WRITE);
3020
3021                 for (j = 0; j < 1000; j++) {
3022                         val = tr32(GRC_EEPROM_ADDR);
3023
3024                         if (val & EEPROM_ADDR_COMPLETE)
3025                                 break;
3026                         msleep(1);
3027                 }
3028                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029                         rc = -EBUSY;
3030                         break;
3031                 }
3032         }
3033
3034         return rc;
3035 }
3036
3037 /* offset and length are dword aligned */
3038 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039                 u8 *buf)
3040 {
3041         int ret = 0;
3042         u32 pagesize = tp->nvram_pagesize;
3043         u32 pagemask = pagesize - 1;
3044         u32 nvram_cmd;
3045         u8 *tmp;
3046
3047         tmp = kmalloc(pagesize, GFP_KERNEL);
3048         if (tmp == NULL)
3049                 return -ENOMEM;
3050
3051         while (len) {
3052                 int j;
3053                 u32 phy_addr, page_off, size;
3054
3055                 phy_addr = offset & ~pagemask;
3056
3057                 for (j = 0; j < pagesize; j += 4) {
3058                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059                                                   (__be32 *) (tmp + j));
3060                         if (ret)
3061                                 break;
3062                 }
3063                 if (ret)
3064                         break;
3065
3066                 page_off = offset & pagemask;
3067                 size = pagesize;
3068                 if (len < size)
3069                         size = len;
3070
3071                 len -= size;
3072
3073                 memcpy(tmp + page_off, buf, size);
3074
3075                 offset = offset + (pagesize - page_off);
3076
3077                 tg3_enable_nvram_access(tp);
3078
3079                 /*
3080                  * Before we can erase the flash page, we need
3081                  * to issue a special "write enable" command.
3082                  */
3083                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3084
3085                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086                         break;
3087
3088                 /* Erase the target page */
3089                 tw32(NVRAM_ADDR, phy_addr);
3090
3091                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3093
3094                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095                         break;
3096
3097                 /* Issue another write enable to start the write. */
3098                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3099
3100                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101                         break;
3102
3103                 for (j = 0; j < pagesize; j += 4) {
3104                         __be32 data;
3105
3106                         data = *((__be32 *) (tmp + j));
3107
3108                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3109
3110                         tw32(NVRAM_ADDR, phy_addr + j);
3111
3112                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113                                 NVRAM_CMD_WR;
3114
3115                         if (j == 0)
3116                                 nvram_cmd |= NVRAM_CMD_FIRST;
3117                         else if (j == (pagesize - 4))
3118                                 nvram_cmd |= NVRAM_CMD_LAST;
3119
3120                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121                         if (ret)
3122                                 break;
3123                 }
3124                 if (ret)
3125                         break;
3126         }
3127
3128         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129         tg3_nvram_exec_cmd(tp, nvram_cmd);
3130
3131         kfree(tmp);
3132
3133         return ret;
3134 }
3135
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138                 u8 *buf)
3139 {
3140         int i, ret = 0;
3141
3142         for (i = 0; i < len; i += 4, offset += 4) {
3143                 u32 page_off, phy_addr, nvram_cmd;
3144                 __be32 data;
3145
3146                 memcpy(&data, buf + i, 4);
3147                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3148
3149                 page_off = offset % tp->nvram_pagesize;
3150
3151                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3152
3153                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3154
3155                 if (page_off == 0 || i == 0)
3156                         nvram_cmd |= NVRAM_CMD_FIRST;
3157                 if (page_off == (tp->nvram_pagesize - 4))
3158                         nvram_cmd |= NVRAM_CMD_LAST;
3159
3160                 if (i == (len - 4))
3161                         nvram_cmd |= NVRAM_CMD_LAST;
3162
3163                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164                     !tg3_flag(tp, FLASH) ||
3165                     !tg3_flag(tp, 57765_PLUS))
3166                         tw32(NVRAM_ADDR, phy_addr);
3167
3168                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169                     !tg3_flag(tp, 5755_PLUS) &&
3170                     (tp->nvram_jedecnum == JEDEC_ST) &&
3171                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3172                         u32 cmd;
3173
3174                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175                         ret = tg3_nvram_exec_cmd(tp, cmd);
3176                         if (ret)
3177                                 break;
3178                 }
3179                 if (!tg3_flag(tp, FLASH)) {
3180                         /* We always do complete word writes to eeprom. */
3181                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3182                 }
3183
3184                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185                 if (ret)
3186                         break;
3187         }
3188         return ret;
3189 }
3190
3191 /* offset and length are dword aligned */
3192 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3193 {
3194         int ret;
3195
3196         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199                 udelay(40);
3200         }
3201
3202         if (!tg3_flag(tp, NVRAM)) {
3203                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204         } else {
3205                 u32 grc_mode;
3206
3207                 ret = tg3_nvram_lock(tp);
3208                 if (ret)
3209                         return ret;
3210
3211                 tg3_enable_nvram_access(tp);
3212                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213                         tw32(NVRAM_WRITE1, 0x406);
3214
3215                 grc_mode = tr32(GRC_MODE);
3216                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3217
3218                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220                                 buf);
3221                 } else {
3222                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223                                 buf);
3224                 }
3225
3226                 grc_mode = tr32(GRC_MODE);
3227                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3228
3229                 tg3_disable_nvram_access(tp);
3230                 tg3_nvram_unlock(tp);
3231         }
3232
3233         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235                 udelay(40);
3236         }
3237
3238         return ret;
3239 }
3240
3241 #define RX_CPU_SCRATCH_BASE     0x30000
3242 #define RX_CPU_SCRATCH_SIZE     0x04000
3243 #define TX_CPU_SCRATCH_BASE     0x34000
3244 #define TX_CPU_SCRATCH_SIZE     0x04000
3245
3246 /* tp->lock is held. */
3247 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3248 {
3249         int i;
3250
3251         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3252
3253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3255
3256                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257                 return 0;
3258         }
3259         if (offset == RX_CPU_BASE) {
3260                 for (i = 0; i < 10000; i++) {
3261                         tw32(offset + CPU_STATE, 0xffffffff);
3262                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3263                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264                                 break;
3265                 }
3266
3267                 tw32(offset + CPU_STATE, 0xffffffff);
3268                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3269                 udelay(10);
3270         } else {
3271                 for (i = 0; i < 10000; i++) {
3272                         tw32(offset + CPU_STATE, 0xffffffff);
3273                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3274                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275                                 break;
3276                 }
3277         }
3278
3279         if (i >= 10000) {
3280                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282                 return -ENODEV;
3283         }
3284
3285         /* Clear firmware's nvram arbitration. */
3286         if (tg3_flag(tp, NVRAM))
3287                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288         return 0;
3289 }
3290
3291 struct fw_info {
3292         unsigned int fw_base;
3293         unsigned int fw_len;
3294         const __be32 *fw_data;
3295 };
3296
3297 /* tp->lock is held. */
3298 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299                                  u32 cpu_scratch_base, int cpu_scratch_size,
3300                                  struct fw_info *info)
3301 {
3302         int err, lock_err, i;
3303         void (*write_op)(struct tg3 *, u32, u32);
3304
3305         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306                 netdev_err(tp->dev,
3307                            "%s: Trying to load TX cpu firmware which is 5705\n",
3308                            __func__);
3309                 return -EINVAL;
3310         }
3311
3312         if (tg3_flag(tp, 5705_PLUS))
3313                 write_op = tg3_write_mem;
3314         else
3315                 write_op = tg3_write_indirect_reg32;
3316
3317         /* It is possible that bootcode is still loading at this point.
3318          * Get the nvram lock first before halting the cpu.
3319          */
3320         lock_err = tg3_nvram_lock(tp);
3321         err = tg3_halt_cpu(tp, cpu_base);
3322         if (!lock_err)
3323                 tg3_nvram_unlock(tp);
3324         if (err)
3325                 goto out;
3326
3327         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328                 write_op(tp, cpu_scratch_base + i, 0);
3329         tw32(cpu_base + CPU_STATE, 0xffffffff);
3330         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332                 write_op(tp, (cpu_scratch_base +
3333                               (info->fw_base & 0xffff) +
3334                               (i * sizeof(u32))),
3335                               be32_to_cpu(info->fw_data[i]));
3336
3337         err = 0;
3338
3339 out:
3340         return err;
3341 }
3342
3343 /* tp->lock is held. */
3344 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3345 {
3346         struct fw_info info;
3347         const __be32 *fw_data;
3348         int err, i;
3349
3350         fw_data = (void *)tp->fw->data;
3351
3352         /* Firmware blob starts with version numbers, followed by
3353            start address and length. We are setting complete length.
3354            length = end_address_of_bss - start_address_of_text.
3355            Remainder is the blob to be loaded contiguously
3356            from start address. */
3357
3358         info.fw_base = be32_to_cpu(fw_data[1]);
3359         info.fw_len = tp->fw->size - 12;
3360         info.fw_data = &fw_data[3];
3361
3362         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364                                     &info);
3365         if (err)
3366                 return err;
3367
3368         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370                                     &info);
3371         if (err)
3372                 return err;
3373
3374         /* Now startup only the RX cpu. */
3375         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3377
3378         for (i = 0; i < 5; i++) {
3379                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380                         break;
3381                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3383                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384                 udelay(1000);
3385         }
3386         if (i >= 5) {
3387                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388                            "should be %08x\n", __func__,
3389                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390                 return -ENODEV;
3391         }
3392         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3394
3395         return 0;
3396 }
3397
3398 /* tp->lock is held. */
3399 static int tg3_load_tso_firmware(struct tg3 *tp)
3400 {
3401         struct fw_info info;
3402         const __be32 *fw_data;
3403         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404         int err, i;
3405
3406         if (tg3_flag(tp, HW_TSO_1) ||
3407             tg3_flag(tp, HW_TSO_2) ||
3408             tg3_flag(tp, HW_TSO_3))
3409                 return 0;
3410
3411         fw_data = (void *)tp->fw->data;
3412
3413         /* Firmware blob starts with version numbers, followed by
3414            start address and length. We are setting complete length.
3415            length = end_address_of_bss - start_address_of_text.
3416            Remainder is the blob to be loaded contiguously
3417            from start address. */
3418
3419         info.fw_base = be32_to_cpu(fw_data[1]);
3420         cpu_scratch_size = tp->fw_len;
3421         info.fw_len = tp->fw->size - 12;
3422         info.fw_data = &fw_data[3];
3423
3424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425                 cpu_base = RX_CPU_BASE;
3426                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427         } else {
3428                 cpu_base = TX_CPU_BASE;
3429                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3431         }
3432
3433         err = tg3_load_firmware_cpu(tp, cpu_base,
3434                                     cpu_scratch_base, cpu_scratch_size,
3435                                     &info);
3436         if (err)
3437                 return err;
3438
3439         /* Now startup the cpu. */
3440         tw32(cpu_base + CPU_STATE, 0xffffffff);
3441         tw32_f(cpu_base + CPU_PC, info.fw_base);
3442
3443         for (i = 0; i < 5; i++) {
3444                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445                         break;
3446                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3448                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449                 udelay(1000);
3450         }
3451         if (i >= 5) {
3452                 netdev_err(tp->dev,
3453                            "%s fails to set CPU PC, is %08x should be %08x\n",
3454                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455                 return -ENODEV;
3456         }
3457         tw32(cpu_base + CPU_STATE, 0xffffffff);
3458         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3459         return 0;
3460 }
3461
3462
3463 /* tp->lock is held. */
3464 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3465 {
3466         u32 addr_high, addr_low;
3467         int i;
3468
3469         addr_high = ((tp->dev->dev_addr[0] << 8) |
3470                      tp->dev->dev_addr[1]);
3471         addr_low = ((tp->dev->dev_addr[2] << 24) |
3472                     (tp->dev->dev_addr[3] << 16) |
3473                     (tp->dev->dev_addr[4] <<  8) |
3474                     (tp->dev->dev_addr[5] <<  0));
3475         for (i = 0; i < 4; i++) {
3476                 if (i == 1 && skip_mac_1)
3477                         continue;
3478                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3480         }
3481
3482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484                 for (i = 0; i < 12; i++) {
3485                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3487                 }
3488         }
3489
3490         addr_high = (tp->dev->dev_addr[0] +
3491                      tp->dev->dev_addr[1] +
3492                      tp->dev->dev_addr[2] +
3493                      tp->dev->dev_addr[3] +
3494                      tp->dev->dev_addr[4] +
3495                      tp->dev->dev_addr[5]) &
3496                 TX_BACKOFF_SEED_MASK;
3497         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3498 }
3499
3500 static void tg3_enable_register_access(struct tg3 *tp)
3501 {
3502         /*
3503          * Make sure register accesses (indirect or otherwise) will function
3504          * correctly.
3505          */
3506         pci_write_config_dword(tp->pdev,
3507                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3508 }
3509
3510 static int tg3_power_up(struct tg3 *tp)
3511 {
3512         int err;
3513
3514         tg3_enable_register_access(tp);
3515
3516         err = pci_set_power_state(tp->pdev, PCI_D0);
3517         if (!err) {
3518                 /* Switch out of Vaux if it is a NIC */
3519                 tg3_pwrsrc_switch_to_vmain(tp);
3520         } else {
3521                 netdev_err(tp->dev, "Transition to D0 failed\n");
3522         }
3523
3524         return err;
3525 }
3526
3527 static int tg3_setup_phy(struct tg3 *, int);
3528
3529 static int tg3_power_down_prepare(struct tg3 *tp)
3530 {
3531         u32 misc_host_ctrl;
3532         bool device_should_wake, do_low_power;
3533
3534         tg3_enable_register_access(tp);
3535
3536         /* Restore the CLKREQ setting. */
3537         if (tg3_flag(tp, CLKREQ_BUG)) {
3538                 u16 lnkctl;
3539
3540                 pci_read_config_word(tp->pdev,
3541                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3542                                      &lnkctl);
3543                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544                 pci_write_config_word(tp->pdev,
3545                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3546                                       lnkctl);
3547         }
3548
3549         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550         tw32(TG3PCI_MISC_HOST_CTRL,
3551              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3552
3553         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3554                              tg3_flag(tp, WOL_ENABLE);
3555
3556         if (tg3_flag(tp, USE_PHYLIB)) {
3557                 do_low_power = false;
3558                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3559                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3560                         struct phy_device *phydev;
3561                         u32 phyid, advertising;
3562
3563                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3564
3565                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3566
3567                         tp->link_config.orig_speed = phydev->speed;
3568                         tp->link_config.orig_duplex = phydev->duplex;
3569                         tp->link_config.orig_autoneg = phydev->autoneg;
3570                         tp->link_config.orig_advertising = phydev->advertising;
3571
3572                         advertising = ADVERTISED_TP |
3573                                       ADVERTISED_Pause |
3574                                       ADVERTISED_Autoneg |
3575                                       ADVERTISED_10baseT_Half;
3576
3577                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3579                                         advertising |=
3580                                                 ADVERTISED_100baseT_Half |
3581                                                 ADVERTISED_100baseT_Full |
3582                                                 ADVERTISED_10baseT_Full;
3583                                 else
3584                                         advertising |= ADVERTISED_10baseT_Full;
3585                         }
3586
3587                         phydev->advertising = advertising;
3588
3589                         phy_start_aneg(phydev);
3590
3591                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3592                         if (phyid != PHY_ID_BCMAC131) {
3593                                 phyid &= PHY_BCM_OUI_MASK;
3594                                 if (phyid == PHY_BCM_OUI_1 ||
3595                                     phyid == PHY_BCM_OUI_2 ||
3596                                     phyid == PHY_BCM_OUI_3)
3597                                         do_low_power = true;
3598                         }
3599                 }
3600         } else {
3601                 do_low_power = true;
3602
3603                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3604                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3605                         tp->link_config.orig_speed = tp->link_config.speed;
3606                         tp->link_config.orig_duplex = tp->link_config.duplex;
3607                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3608                 }
3609
3610                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3611                         tp->link_config.speed = SPEED_10;
3612                         tp->link_config.duplex = DUPLEX_HALF;
3613                         tp->link_config.autoneg = AUTONEG_ENABLE;
3614                         tg3_setup_phy(tp, 0);
3615                 }
3616         }
3617
3618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3619                 u32 val;
3620
3621                 val = tr32(GRC_VCPU_EXT_CTRL);
3622                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3623         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3624                 int i;
3625                 u32 val;
3626
3627                 for (i = 0; i < 200; i++) {
3628                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3629                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3630                                 break;
3631                         msleep(1);
3632                 }
3633         }
3634         if (tg3_flag(tp, WOL_CAP))
3635                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3636                                                      WOL_DRV_STATE_SHUTDOWN |
3637                                                      WOL_DRV_WOL |
3638                                                      WOL_SET_MAGIC_PKT);
3639
3640         if (device_should_wake) {
3641                 u32 mac_mode;
3642
3643                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644                         if (do_low_power &&
3645                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3646                                 tg3_phy_auxctl_write(tp,
3647                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3648                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3649                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3650                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3651                                 udelay(40);
3652                         }
3653
3654                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3655                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656                         else
3657                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3658
3659                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3660                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661                             ASIC_REV_5700) {
3662                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3663                                              SPEED_100 : SPEED_10;
3664                                 if (tg3_5700_link_polarity(tp, speed))
3665                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3666                                 else
3667                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3668                         }
3669                 } else {
3670                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3671                 }
3672
3673                 if (!tg3_flag(tp, 5750_PLUS))
3674                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3675
3676                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3677                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3678                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3679                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3680
3681                 if (tg3_flag(tp, ENABLE_APE))
3682                         mac_mode |= MAC_MODE_APE_TX_EN |
3683                                     MAC_MODE_APE_RX_EN |
3684                                     MAC_MODE_TDE_ENABLE;
3685
3686                 tw32_f(MAC_MODE, mac_mode);
3687                 udelay(100);
3688
3689                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3690                 udelay(10);
3691         }
3692
3693         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3694             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3695              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3696                 u32 base_val;
3697
3698                 base_val = tp->pci_clock_ctrl;
3699                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3700                              CLOCK_CTRL_TXCLK_DISABLE);
3701
3702                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3703                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3704         } else if (tg3_flag(tp, 5780_CLASS) ||
3705                    tg3_flag(tp, CPMU_PRESENT) ||
3706                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707                 /* do nothing */
3708         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3709                 u32 newbits1, newbits2;
3710
3711                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3712                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3713                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3714                                     CLOCK_CTRL_TXCLK_DISABLE |
3715                                     CLOCK_CTRL_ALTCLK);
3716                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3717                 } else if (tg3_flag(tp, 5705_PLUS)) {
3718                         newbits1 = CLOCK_CTRL_625_CORE;
3719                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720                 } else {
3721                         newbits1 = CLOCK_CTRL_ALTCLK;
3722                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3723                 }
3724
3725                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3726                             40);
3727
3728                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3729                             40);
3730
3731                 if (!tg3_flag(tp, 5705_PLUS)) {
3732                         u32 newbits3;
3733
3734                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3735                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3736                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3737                                             CLOCK_CTRL_TXCLK_DISABLE |
3738                                             CLOCK_CTRL_44MHZ_CORE);
3739                         } else {
3740                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3741                         }
3742
3743                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3744                                     tp->pci_clock_ctrl | newbits3, 40);
3745                 }
3746         }
3747
3748         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3749                 tg3_power_down_phy(tp, do_low_power);
3750
3751         tg3_frob_aux_power(tp, true);
3752
3753         /* Workaround for unstable PLL clock */
3754         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3755             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3756                 u32 val = tr32(0x7d00);
3757
3758                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759                 tw32(0x7d00, val);
3760                 if (!tg3_flag(tp, ENABLE_ASF)) {
3761                         int err;
3762
3763                         err = tg3_nvram_lock(tp);
3764                         tg3_halt_cpu(tp, RX_CPU_BASE);
3765                         if (!err)
3766                                 tg3_nvram_unlock(tp);
3767                 }
3768         }
3769
3770         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3771
3772         return 0;
3773 }
3774
3775 static void tg3_power_down(struct tg3 *tp)
3776 {
3777         tg3_power_down_prepare(tp);
3778
3779         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3780         pci_set_power_state(tp->pdev, PCI_D3hot);
3781 }
3782
3783 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784 {
3785         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3786         case MII_TG3_AUX_STAT_10HALF:
3787                 *speed = SPEED_10;
3788                 *duplex = DUPLEX_HALF;
3789                 break;
3790
3791         case MII_TG3_AUX_STAT_10FULL:
3792                 *speed = SPEED_10;
3793                 *duplex = DUPLEX_FULL;
3794                 break;
3795
3796         case MII_TG3_AUX_STAT_100HALF:
3797                 *speed = SPEED_100;
3798                 *duplex = DUPLEX_HALF;
3799                 break;
3800
3801         case MII_TG3_AUX_STAT_100FULL:
3802                 *speed = SPEED_100;
3803                 *duplex = DUPLEX_FULL;
3804                 break;
3805
3806         case MII_TG3_AUX_STAT_1000HALF:
3807                 *speed = SPEED_1000;
3808                 *duplex = DUPLEX_HALF;
3809                 break;
3810
3811         case MII_TG3_AUX_STAT_1000FULL:
3812                 *speed = SPEED_1000;
3813                 *duplex = DUPLEX_FULL;
3814                 break;
3815
3816         default:
3817                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3818                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819                                  SPEED_10;
3820                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3821                                   DUPLEX_HALF;
3822                         break;
3823                 }
3824                 *speed = SPEED_INVALID;
3825                 *duplex = DUPLEX_INVALID;
3826                 break;
3827         }
3828 }
3829
3830 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3831 {
3832         int err = 0;
3833         u32 val, new_adv;
3834
3835         new_adv = ADVERTISE_CSMA;
3836         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3837         new_adv |= mii_advertise_flowctrl(flowctrl);
3838
3839         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3840         if (err)
3841                 goto done;
3842
3843         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3844                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3845
3846                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3847                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3848                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3849
3850                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3851                 if (err)
3852                         goto done;
3853         }
3854
3855         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3856                 goto done;
3857
3858         tw32(TG3_CPMU_EEE_MODE,
3859              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3860
3861         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3862         if (!err) {
3863                 u32 err2;
3864
3865                 val = 0;
3866                 /* Advertise 100-BaseTX EEE ability */
3867                 if (advertise & ADVERTISED_100baseT_Full)
3868                         val |= MDIO_AN_EEE_ADV_100TX;
3869                 /* Advertise 1000-BaseT EEE ability */
3870                 if (advertise & ADVERTISED_1000baseT_Full)
3871                         val |= MDIO_AN_EEE_ADV_1000T;
3872                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3873                 if (err)
3874                         val = 0;
3875
3876                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877                 case ASIC_REV_5717:
3878                 case ASIC_REV_57765:
3879                 case ASIC_REV_57766:
3880                 case ASIC_REV_5719:
3881                         /* If we advertised any eee advertisements above... */
3882                         if (val)
3883                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3884                                       MII_TG3_DSP_TAP26_RMRXSTO |
3885                                       MII_TG3_DSP_TAP26_OPCSINPT;
3886                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3887                         /* Fall through */
3888                 case ASIC_REV_5720:
3889                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3890                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3891                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3892                 }
3893
3894                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3895                 if (!err)
3896                         err = err2;
3897         }
3898
3899 done:
3900         return err;
3901 }
3902
3903 static void tg3_phy_copper_begin(struct tg3 *tp)
3904 {
3905         u32 new_adv;
3906         int i;
3907
3908         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909                 new_adv = ADVERTISED_10baseT_Half |
3910                           ADVERTISED_10baseT_Full;
3911                 if (tg3_flag(tp, WOL_SPEED_100MB))
3912                         new_adv |= ADVERTISED_100baseT_Half |
3913                                    ADVERTISED_100baseT_Full;
3914
3915                 tg3_phy_autoneg_cfg(tp, new_adv,
3916                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3917         } else if (tp->link_config.speed == SPEED_INVALID) {
3918                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919                         tp->link_config.advertising &=
3920                                 ~(ADVERTISED_1000baseT_Half |
3921                                   ADVERTISED_1000baseT_Full);
3922
3923                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3924                                     tp->link_config.flowctrl);
3925         } else {
3926                 /* Asking for a specific link mode. */
3927                 if (tp->link_config.speed == SPEED_1000) {
3928                         if (tp->link_config.duplex == DUPLEX_FULL)
3929                                 new_adv = ADVERTISED_1000baseT_Full;
3930                         else
3931                                 new_adv = ADVERTISED_1000baseT_Half;
3932                 } else if (tp->link_config.speed == SPEED_100) {
3933                         if (tp->link_config.duplex == DUPLEX_FULL)
3934                                 new_adv = ADVERTISED_100baseT_Full;
3935                         else
3936                                 new_adv = ADVERTISED_100baseT_Half;
3937                 } else {
3938                         if (tp->link_config.duplex == DUPLEX_FULL)
3939                                 new_adv = ADVERTISED_10baseT_Full;
3940                         else
3941                                 new_adv = ADVERTISED_10baseT_Half;
3942                 }
3943
3944                 tg3_phy_autoneg_cfg(tp, new_adv,
3945                                     tp->link_config.flowctrl);
3946         }
3947
3948         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3949             tp->link_config.speed != SPEED_INVALID) {
3950                 u32 bmcr, orig_bmcr;
3951
3952                 tp->link_config.active_speed = tp->link_config.speed;
3953                 tp->link_config.active_duplex = tp->link_config.duplex;
3954
3955                 bmcr = 0;
3956                 switch (tp->link_config.speed) {
3957                 default:
3958                 case SPEED_10:
3959                         break;
3960
3961                 case SPEED_100:
3962                         bmcr |= BMCR_SPEED100;
3963                         break;
3964
3965                 case SPEED_1000:
3966                         bmcr |= BMCR_SPEED1000;
3967                         break;
3968                 }
3969
3970                 if (tp->link_config.duplex == DUPLEX_FULL)
3971                         bmcr |= BMCR_FULLDPLX;
3972
3973                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3974                     (bmcr != orig_bmcr)) {
3975                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3976                         for (i = 0; i < 1500; i++) {
3977                                 u32 tmp;
3978
3979                                 udelay(10);
3980                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3981                                     tg3_readphy(tp, MII_BMSR, &tmp))
3982                                         continue;
3983                                 if (!(tmp & BMSR_LSTATUS)) {
3984                                         udelay(40);
3985                                         break;
3986                                 }
3987                         }
3988                         tg3_writephy(tp, MII_BMCR, bmcr);
3989                         udelay(40);
3990                 }
3991         } else {
3992                 tg3_writephy(tp, MII_BMCR,
3993                              BMCR_ANENABLE | BMCR_ANRESTART);
3994         }
3995 }
3996
3997 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3998 {
3999         int err;
4000
4001         /* Turn off tap power management. */
4002         /* Set Extended packet length bit */
4003         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4004
4005         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4006         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4007         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4008         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4009         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4010
4011         udelay(40);
4012
4013         return err;
4014 }
4015
4016 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4017 {
4018         u32 advmsk, tgtadv, advertising;
4019
4020         advertising = tp->link_config.advertising;
4021         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4022
4023         advmsk = ADVERTISE_ALL;
4024         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4025                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4026                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4027         }
4028
4029         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4030                 return false;
4031
4032         if ((*lcladv & advmsk) != tgtadv)
4033                 return false;
4034
4035         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4036                 u32 tg3_ctrl;
4037
4038                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4039
4040                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4041                         return false;
4042
4043                 if (tgtadv &&
4044                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4045                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4046                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4047                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4048                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4049                 } else {
4050                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4051                 }
4052
4053                 if (tg3_ctrl != tgtadv)
4054                         return false;
4055         }
4056
4057         return true;
4058 }
4059
4060 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4061 {
4062         u32 lpeth = 0;
4063
4064         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4065                 u32 val;
4066
4067                 if (tg3_readphy(tp, MII_STAT1000, &val))
4068                         return false;
4069
4070                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4071         }
4072
4073         if (tg3_readphy(tp, MII_LPA, rmtadv))
4074                 return false;
4075
4076         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4077         tp->link_config.rmt_adv = lpeth;
4078
4079         return true;
4080 }
4081
4082 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4083 {
4084         int current_link_up;
4085         u32 bmsr, val;
4086         u32 lcl_adv, rmt_adv;
4087         u16 current_speed;
4088         u8 current_duplex;
4089         int i, err;
4090
4091         tw32(MAC_EVENT, 0);
4092
4093         tw32_f(MAC_STATUS,
4094              (MAC_STATUS_SYNC_CHANGED |
4095               MAC_STATUS_CFG_CHANGED |
4096               MAC_STATUS_MI_COMPLETION |
4097               MAC_STATUS_LNKSTATE_CHANGED));
4098         udelay(40);
4099
4100         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4101                 tw32_f(MAC_MI_MODE,
4102                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4103                 udelay(80);
4104         }
4105
4106         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4107
4108         /* Some third-party PHYs need to be reset on link going
4109          * down.
4110          */
4111         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4112              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4113              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4114             netif_carrier_ok(tp->dev)) {
4115                 tg3_readphy(tp, MII_BMSR, &bmsr);
4116                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4117                     !(bmsr & BMSR_LSTATUS))
4118                         force_reset = 1;
4119         }
4120         if (force_reset)
4121                 tg3_phy_reset(tp);
4122
4123         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4124                 tg3_readphy(tp, MII_BMSR, &bmsr);
4125                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4126                     !tg3_flag(tp, INIT_COMPLETE))
4127                         bmsr = 0;
4128
4129                 if (!(bmsr & BMSR_LSTATUS)) {
4130                         err = tg3_init_5401phy_dsp(tp);
4131                         if (err)
4132                                 return err;
4133
4134                         tg3_readphy(tp, MII_BMSR, &bmsr);
4135                         for (i = 0; i < 1000; i++) {
4136                                 udelay(10);
4137                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4138                                     (bmsr & BMSR_LSTATUS)) {
4139                                         udelay(40);
4140                                         break;
4141                                 }
4142                         }
4143
4144                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4145                             TG3_PHY_REV_BCM5401_B0 &&
4146                             !(bmsr & BMSR_LSTATUS) &&
4147                             tp->link_config.active_speed == SPEED_1000) {
4148                                 err = tg3_phy_reset(tp);
4149                                 if (!err)
4150                                         err = tg3_init_5401phy_dsp(tp);
4151                                 if (err)
4152                                         return err;
4153                         }
4154                 }
4155         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4156                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4157                 /* 5701 {A0,B0} CRC bug workaround */
4158                 tg3_writephy(tp, 0x15, 0x0a75);
4159                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4160                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4161                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4162         }
4163
4164         /* Clear pending interrupts... */
4165         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4166         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4167
4168         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4169                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4170         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4171                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4172
4173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4174             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4175                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4176                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4177                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4178                 else
4179                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4180         }
4181
4182         current_link_up = 0;
4183         current_speed = SPEED_INVALID;
4184         current_duplex = DUPLEX_INVALID;
4185         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4186         tp->link_config.rmt_adv = 0;
4187
4188         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4189                 err = tg3_phy_auxctl_read(tp,
4190                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4191                                           &val);
4192                 if (!err && !(val & (1 << 10))) {
4193                         tg3_phy_auxctl_write(tp,
4194                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4195                                              val | (1 << 10));
4196                         goto relink;
4197                 }
4198         }
4199
4200         bmsr = 0;
4201         for (i = 0; i < 100; i++) {
4202                 tg3_readphy(tp, MII_BMSR, &bmsr);
4203                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4204                     (bmsr & BMSR_LSTATUS))
4205                         break;
4206                 udelay(40);
4207         }
4208
4209         if (bmsr & BMSR_LSTATUS) {
4210                 u32 aux_stat, bmcr;
4211
4212                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4213                 for (i = 0; i < 2000; i++) {
4214                         udelay(10);
4215                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4216                             aux_stat)
4217                                 break;
4218                 }
4219
4220                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4221                                              &current_speed,
4222                                              &current_duplex);
4223
4224                 bmcr = 0;
4225                 for (i = 0; i < 200; i++) {
4226                         tg3_readphy(tp, MII_BMCR, &bmcr);
4227                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4228                                 continue;
4229                         if (bmcr && bmcr != 0x7fff)
4230                                 break;
4231                         udelay(10);
4232                 }
4233
4234                 lcl_adv = 0;
4235                 rmt_adv = 0;
4236
4237                 tp->link_config.active_speed = current_speed;
4238                 tp->link_config.active_duplex = current_duplex;
4239
4240                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4241                         if ((bmcr & BMCR_ANENABLE) &&
4242                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4243                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4244                                 current_link_up = 1;
4245                 } else {
4246                         if (!(bmcr & BMCR_ANENABLE) &&
4247                             tp->link_config.speed == current_speed &&
4248                             tp->link_config.duplex == current_duplex &&
4249                             tp->link_config.flowctrl ==
4250                             tp->link_config.active_flowctrl) {
4251                                 current_link_up = 1;
4252                         }
4253                 }
4254
4255                 if (current_link_up == 1 &&
4256                     tp->link_config.active_duplex == DUPLEX_FULL) {
4257                         u32 reg, bit;
4258
4259                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4260                                 reg = MII_TG3_FET_GEN_STAT;
4261                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4262                         } else {
4263                                 reg = MII_TG3_EXT_STAT;
4264                                 bit = MII_TG3_EXT_STAT_MDIX;
4265                         }
4266
4267                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4268                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4269
4270                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4271                 }
4272         }
4273
4274 relink:
4275         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4276                 tg3_phy_copper_begin(tp);
4277
4278                 tg3_readphy(tp, MII_BMSR, &bmsr);
4279                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4280                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4281                         current_link_up = 1;
4282         }
4283
4284         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4285         if (current_link_up == 1) {
4286                 if (tp->link_config.active_speed == SPEED_100 ||
4287                     tp->link_config.active_speed == SPEED_10)
4288                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4289                 else
4290                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4291         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4292                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4293         else
4294                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4295
4296         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4297         if (tp->link_config.active_duplex == DUPLEX_HALF)
4298                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4299
4300         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4301                 if (current_link_up == 1 &&
4302                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4303                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4304                 else
4305                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4306         }
4307
4308         /* ??? Without this setting Netgear GA302T PHY does not
4309          * ??? send/receive packets...
4310          */
4311         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4312             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4313                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4314                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4315                 udelay(80);
4316         }
4317
4318         tw32_f(MAC_MODE, tp->mac_mode);
4319         udelay(40);
4320
4321         tg3_phy_eee_adjust(tp, current_link_up);
4322
4323         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4324                 /* Polled via timer. */
4325                 tw32_f(MAC_EVENT, 0);
4326         } else {
4327                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4328         }
4329         udelay(40);
4330
4331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4332             current_link_up == 1 &&
4333             tp->link_config.active_speed == SPEED_1000 &&
4334             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4335                 udelay(120);
4336                 tw32_f(MAC_STATUS,
4337                      (MAC_STATUS_SYNC_CHANGED |
4338                       MAC_STATUS_CFG_CHANGED));
4339                 udelay(40);
4340                 tg3_write_mem(tp,
4341                               NIC_SRAM_FIRMWARE_MBOX,
4342                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4343         }
4344
4345         /* Prevent send BD corruption. */
4346         if (tg3_flag(tp, CLKREQ_BUG)) {
4347                 u16 oldlnkctl, newlnkctl;
4348
4349                 pci_read_config_word(tp->pdev,
4350                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4351                                      &oldlnkctl);
4352                 if (tp->link_config.active_speed == SPEED_100 ||
4353                     tp->link_config.active_speed == SPEED_10)
4354                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4355                 else
4356                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4357                 if (newlnkctl != oldlnkctl)
4358                         pci_write_config_word(tp->pdev,
4359                                               pci_pcie_cap(tp->pdev) +
4360                                               PCI_EXP_LNKCTL, newlnkctl);
4361         }
4362
4363         if (current_link_up != netif_carrier_ok(tp->dev)) {
4364                 if (current_link_up)
4365                         netif_carrier_on(tp->dev);
4366                 else
4367                         netif_carrier_off(tp->dev);
4368                 tg3_link_report(tp);
4369         }
4370
4371         return 0;
4372 }
4373
4374 struct tg3_fiber_aneginfo {
4375         int state;
4376 #define ANEG_STATE_UNKNOWN              0
4377 #define ANEG_STATE_AN_ENABLE            1
4378 #define ANEG_STATE_RESTART_INIT         2
4379 #define ANEG_STATE_RESTART              3
4380 #define ANEG_STATE_DISABLE_LINK_OK      4
4381 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4382 #define ANEG_STATE_ABILITY_DETECT       6
4383 #define ANEG_STATE_ACK_DETECT_INIT      7
4384 #define ANEG_STATE_ACK_DETECT           8
4385 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4386 #define ANEG_STATE_COMPLETE_ACK         10
4387 #define ANEG_STATE_IDLE_DETECT_INIT     11
4388 #define ANEG_STATE_IDLE_DETECT          12
4389 #define ANEG_STATE_LINK_OK              13
4390 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4391 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4392
4393         u32 flags;
4394 #define MR_AN_ENABLE            0x00000001
4395 #define MR_RESTART_AN           0x00000002
4396 #define MR_AN_COMPLETE          0x00000004
4397 #define MR_PAGE_RX              0x00000008
4398 #define MR_NP_LOADED            0x00000010
4399 #define MR_TOGGLE_TX            0x00000020
4400 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4401 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4402 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4403 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4404 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4405 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4406 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4407 #define MR_TOGGLE_RX            0x00002000
4408 #define MR_NP_RX                0x00004000
4409
4410 #define MR_LINK_OK              0x80000000
4411
4412         unsigned long link_time, cur_time;
4413
4414         u32 ability_match_cfg;
4415         int ability_match_count;
4416
4417         char ability_match, idle_match, ack_match;
4418
4419         u32 txconfig, rxconfig;
4420 #define ANEG_CFG_NP             0x00000080
4421 #define ANEG_CFG_ACK            0x00000040
4422 #define ANEG_CFG_RF2            0x00000020
4423 #define ANEG_CFG_RF1            0x00000010
4424 #define ANEG_CFG_PS2            0x00000001
4425 #define ANEG_CFG_PS1            0x00008000
4426 #define ANEG_CFG_HD             0x00004000
4427 #define ANEG_CFG_FD             0x00002000
4428 #define ANEG_CFG_INVAL          0x00001f06
4429
4430 };
4431 #define ANEG_OK         0
4432 #define ANEG_DONE       1
4433 #define ANEG_TIMER_ENAB 2
4434 #define ANEG_FAILED     -1
4435
4436 #define ANEG_STATE_SETTLE_TIME  10000
4437
4438 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4439                                    struct tg3_fiber_aneginfo *ap)
4440 {
4441         u16 flowctrl;
4442         unsigned long delta;
4443         u32 rx_cfg_reg;
4444         int ret;
4445
4446         if (ap->state == ANEG_STATE_UNKNOWN) {
4447                 ap->rxconfig = 0;
4448                 ap->link_time = 0;
4449                 ap->cur_time = 0;
4450                 ap->ability_match_cfg = 0;
4451                 ap->ability_match_count = 0;
4452                 ap->ability_match = 0;
4453                 ap->idle_match = 0;
4454                 ap->ack_match = 0;
4455         }
4456         ap->cur_time++;
4457
4458         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4459                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4460
4461                 if (rx_cfg_reg != ap->ability_match_cfg) {
4462                         ap->ability_match_cfg = rx_cfg_reg;
4463                         ap->ability_match = 0;
4464                         ap->ability_match_count = 0;
4465                 } else {
4466                         if (++ap->ability_match_count > 1) {
4467                                 ap->ability_match = 1;
4468                                 ap->ability_match_cfg = rx_cfg_reg;
4469                         }
4470                 }
4471                 if (rx_cfg_reg & ANEG_CFG_ACK)
4472                         ap->ack_match = 1;
4473                 else
4474                         ap->ack_match = 0;
4475
4476                 ap->idle_match = 0;
4477         } else {
4478                 ap->idle_match = 1;
4479                 ap->ability_match_cfg = 0;
4480                 ap->ability_match_count = 0;
4481                 ap->ability_match = 0;
4482                 ap->ack_match = 0;
4483
4484                 rx_cfg_reg = 0;
4485         }
4486
4487         ap->rxconfig = rx_cfg_reg;
4488         ret = ANEG_OK;
4489
4490         switch (ap->state) {
4491         case ANEG_STATE_UNKNOWN:
4492                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4493                         ap->state = ANEG_STATE_AN_ENABLE;
4494
4495                 /* fallthru */
4496         case ANEG_STATE_AN_ENABLE:
4497                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4498                 if (ap->flags & MR_AN_ENABLE) {
4499                         ap->link_time = 0;
4500                         ap->cur_time = 0;
4501                         ap->ability_match_cfg = 0;
4502                         ap->ability_match_count = 0;
4503                         ap->ability_match = 0;
4504                         ap->idle_match = 0;
4505                         ap->ack_match = 0;
4506
4507                         ap->state = ANEG_STATE_RESTART_INIT;
4508                 } else {
4509                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4510                 }
4511                 break;
4512
4513         case ANEG_STATE_RESTART_INIT:
4514                 ap->link_time = ap->cur_time;
4515                 ap->flags &= ~(MR_NP_LOADED);
4516                 ap->txconfig = 0;
4517                 tw32(MAC_TX_AUTO_NEG, 0);
4518                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4519                 tw32_f(MAC_MODE, tp->mac_mode);
4520                 udelay(40);
4521
4522                 ret = ANEG_TIMER_ENAB;
4523                 ap->state = ANEG_STATE_RESTART;
4524
4525                 /* fallthru */
4526         case ANEG_STATE_RESTART:
4527                 delta = ap->cur_time - ap->link_time;
4528                 if (delta > ANEG_STATE_SETTLE_TIME)
4529                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4530                 else
4531                         ret = ANEG_TIMER_ENAB;
4532                 break;
4533
4534         case ANEG_STATE_DISABLE_LINK_OK:
4535                 ret = ANEG_DONE;
4536                 break;
4537
4538         case ANEG_STATE_ABILITY_DETECT_INIT:
4539                 ap->flags &= ~(MR_TOGGLE_TX);
4540                 ap->txconfig = ANEG_CFG_FD;
4541                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4542                 if (flowctrl & ADVERTISE_1000XPAUSE)
4543                         ap->txconfig |= ANEG_CFG_PS1;
4544                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4545                         ap->txconfig |= ANEG_CFG_PS2;
4546                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4547                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4548                 tw32_f(MAC_MODE, tp->mac_mode);
4549                 udelay(40);
4550
4551                 ap->state = ANEG_STATE_ABILITY_DETECT;
4552                 break;
4553
4554         case ANEG_STATE_ABILITY_DETECT:
4555                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4556                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4557                 break;
4558
4559         case ANEG_STATE_ACK_DETECT_INIT:
4560                 ap->txconfig |= ANEG_CFG_ACK;
4561                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4562                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4563                 tw32_f(MAC_MODE, tp->mac_mode);
4564                 udelay(40);
4565
4566                 ap->state = ANEG_STATE_ACK_DETECT;
4567
4568                 /* fallthru */
4569         case ANEG_STATE_ACK_DETECT:
4570                 if (ap->ack_match != 0) {
4571                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4572                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4573                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4574                         } else {
4575                                 ap->state = ANEG_STATE_AN_ENABLE;
4576                         }
4577                 } else if (ap->ability_match != 0 &&
4578                            ap->rxconfig == 0) {
4579                         ap->state = ANEG_STATE_AN_ENABLE;
4580                 }
4581                 break;
4582
4583         case ANEG_STATE_COMPLETE_ACK_INIT:
4584                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4585                         ret = ANEG_FAILED;
4586                         break;
4587                 }
4588                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4589                                MR_LP_ADV_HALF_DUPLEX |
4590                                MR_LP_ADV_SYM_PAUSE |
4591                                MR_LP_ADV_ASYM_PAUSE |
4592                                MR_LP_ADV_REMOTE_FAULT1 |
4593                                MR_LP_ADV_REMOTE_FAULT2 |
4594                                MR_LP_ADV_NEXT_PAGE |
4595                                MR_TOGGLE_RX |
4596                                MR_NP_RX);
4597                 if (ap->rxconfig & ANEG_CFG_FD)
4598                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4599                 if (ap->rxconfig & ANEG_CFG_HD)
4600                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4601                 if (ap->rxconfig & ANEG_CFG_PS1)
4602                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4603                 if (ap->rxconfig & ANEG_CFG_PS2)
4604                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4605                 if (ap->rxconfig & ANEG_CFG_RF1)
4606                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4607                 if (ap->rxconfig & ANEG_CFG_RF2)
4608                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4609                 if (ap->rxconfig & ANEG_CFG_NP)
4610                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4611
4612                 ap->link_time = ap->cur_time;
4613
4614                 ap->flags ^= (MR_TOGGLE_TX);
4615                 if (ap->rxconfig & 0x0008)
4616                         ap->flags |= MR_TOGGLE_RX;
4617                 if (ap->rxconfig & ANEG_CFG_NP)
4618                         ap->flags |= MR_NP_RX;
4619                 ap->flags |= MR_PAGE_RX;
4620
4621                 ap->state = ANEG_STATE_COMPLETE_ACK;
4622                 ret = ANEG_TIMER_ENAB;
4623                 break;
4624
4625         case ANEG_STATE_COMPLETE_ACK:
4626                 if (ap->ability_match != 0 &&
4627                     ap->rxconfig == 0) {
4628                         ap->state = ANEG_STATE_AN_ENABLE;
4629                         break;
4630                 }
4631                 delta = ap->cur_time - ap->link_time;
4632                 if (delta > ANEG_STATE_SETTLE_TIME) {
4633                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4634                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4635                         } else {
4636                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4637                                     !(ap->flags & MR_NP_RX)) {
4638                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4639                                 } else {
4640                                         ret = ANEG_FAILED;
4641                                 }
4642                         }
4643                 }
4644                 break;
4645
4646         case ANEG_STATE_IDLE_DETECT_INIT:
4647                 ap->link_time = ap->cur_time;
4648                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4649                 tw32_f(MAC_MODE, tp->mac_mode);
4650                 udelay(40);
4651
4652                 ap->state = ANEG_STATE_IDLE_DETECT;
4653                 ret = ANEG_TIMER_ENAB;
4654                 break;
4655
4656         case ANEG_STATE_IDLE_DETECT:
4657                 if (ap->ability_match != 0 &&
4658                     ap->rxconfig == 0) {
4659                         ap->state = ANEG_STATE_AN_ENABLE;
4660                         break;
4661                 }
4662                 delta = ap->cur_time - ap->link_time;
4663                 if (delta > ANEG_STATE_SETTLE_TIME) {
4664                         /* XXX another gem from the Broadcom driver :( */
4665                         ap->state = ANEG_STATE_LINK_OK;
4666                 }
4667                 break;
4668
4669         case ANEG_STATE_LINK_OK:
4670                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4671                 ret = ANEG_DONE;
4672                 break;
4673
4674         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4675                 /* ??? unimplemented */
4676                 break;
4677
4678         case ANEG_STATE_NEXT_PAGE_WAIT:
4679                 /* ??? unimplemented */
4680                 break;
4681
4682         default:
4683                 ret = ANEG_FAILED;
4684                 break;
4685         }
4686
4687         return ret;
4688 }
4689
4690 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4691 {
4692         int res = 0;
4693         struct tg3_fiber_aneginfo aninfo;
4694         int status = ANEG_FAILED;
4695         unsigned int tick;
4696         u32 tmp;
4697
4698         tw32_f(MAC_TX_AUTO_NEG, 0);
4699
4700         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4701         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4702         udelay(40);
4703
4704         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4705         udelay(40);
4706
4707         memset(&aninfo, 0, sizeof(aninfo));
4708         aninfo.flags |= MR_AN_ENABLE;
4709         aninfo.state = ANEG_STATE_UNKNOWN;
4710         aninfo.cur_time = 0;
4711         tick = 0;
4712         while (++tick < 195000) {
4713                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4714                 if (status == ANEG_DONE || status == ANEG_FAILED)
4715                         break;
4716
4717                 udelay(1);
4718         }
4719
4720         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4721         tw32_f(MAC_MODE, tp->mac_mode);
4722         udelay(40);
4723
4724         *txflags = aninfo.txconfig;
4725         *rxflags = aninfo.flags;
4726
4727         if (status == ANEG_DONE &&
4728             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4729                              MR_LP_ADV_FULL_DUPLEX)))
4730                 res = 1;
4731
4732         return res;
4733 }
4734
4735 static void tg3_init_bcm8002(struct tg3 *tp)
4736 {
4737         u32 mac_status = tr32(MAC_STATUS);
4738         int i;
4739
4740         /* Reset when initting first time or we have a link. */
4741         if (tg3_flag(tp, INIT_COMPLETE) &&
4742             !(mac_status & MAC_STATUS_PCS_SYNCED))
4743                 return;
4744
4745         /* Set PLL lock range. */
4746         tg3_writephy(tp, 0x16, 0x8007);
4747
4748         /* SW reset */
4749         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4750
4751         /* Wait for reset to complete. */
4752         /* XXX schedule_timeout() ... */
4753         for (i = 0; i < 500; i++)
4754                 udelay(10);
4755
4756         /* Config mode; select PMA/Ch 1 regs. */
4757         tg3_writephy(tp, 0x10, 0x8411);
4758
4759         /* Enable auto-lock and comdet, select txclk for tx. */
4760         tg3_writephy(tp, 0x11, 0x0a10);
4761
4762         tg3_writephy(tp, 0x18, 0x00a0);
4763         tg3_writephy(tp, 0x16, 0x41ff);
4764
4765         /* Assert and deassert POR. */
4766         tg3_writephy(tp, 0x13, 0x0400);
4767         udelay(40);
4768         tg3_writephy(tp, 0x13, 0x0000);
4769
4770         tg3_writephy(tp, 0x11, 0x0a50);
4771         udelay(40);
4772         tg3_writephy(tp, 0x11, 0x0a10);
4773
4774         /* Wait for signal to stabilize */
4775         /* XXX schedule_timeout() ... */
4776         for (i = 0; i < 15000; i++)
4777                 udelay(10);
4778
4779         /* Deselect the channel register so we can read the PHYID
4780          * later.
4781          */
4782         tg3_writephy(tp, 0x10, 0x8011);
4783 }
4784
4785 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4786 {
4787         u16 flowctrl;
4788         u32 sg_dig_ctrl, sg_dig_status;
4789         u32 serdes_cfg, expected_sg_dig_ctrl;
4790         int workaround, port_a;
4791         int current_link_up;
4792
4793         serdes_cfg = 0;
4794         expected_sg_dig_ctrl = 0;
4795         workaround = 0;
4796         port_a = 1;
4797         current_link_up = 0;
4798
4799         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4800             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4801                 workaround = 1;
4802                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4803                         port_a = 0;
4804
4805                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4806                 /* preserve bits 20-23 for voltage regulator */
4807                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4808         }
4809
4810         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4811
4812         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4813                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4814                         if (workaround) {
4815                                 u32 val = serdes_cfg;
4816
4817                                 if (port_a)
4818                                         val |= 0xc010000;
4819                                 else
4820                                         val |= 0x4010000;
4821                                 tw32_f(MAC_SERDES_CFG, val);
4822                         }
4823
4824                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4825                 }
4826                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4827                         tg3_setup_flow_control(tp, 0, 0);
4828                         current_link_up = 1;
4829                 }
4830                 goto out;
4831         }
4832
4833         /* Want auto-negotiation.  */
4834         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4835
4836         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4837         if (flowctrl & ADVERTISE_1000XPAUSE)
4838                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4839         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4840                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4841
4842         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4843                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4844                     tp->serdes_counter &&
4845                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4846                                     MAC_STATUS_RCVD_CFG)) ==
4847                      MAC_STATUS_PCS_SYNCED)) {
4848                         tp->serdes_counter--;
4849                         current_link_up = 1;
4850                         goto out;
4851                 }
4852 restart_autoneg:
4853                 if (workaround)
4854                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4855                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4856                 udelay(5);
4857                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4858
4859                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4860                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4861         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4862                                  MAC_STATUS_SIGNAL_DET)) {
4863                 sg_dig_status = tr32(SG_DIG_STATUS);
4864                 mac_status = tr32(MAC_STATUS);
4865
4866                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4867                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4868                         u32 local_adv = 0, remote_adv = 0;
4869
4870                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4871                                 local_adv |= ADVERTISE_1000XPAUSE;
4872                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4873                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4874
4875                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4876                                 remote_adv |= LPA_1000XPAUSE;
4877                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4878                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4879
4880                         tp->link_config.rmt_adv =
4881                                            mii_adv_to_ethtool_adv_x(remote_adv);
4882
4883                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4884                         current_link_up = 1;
4885                         tp->serdes_counter = 0;
4886                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4887                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4888                         if (tp->serdes_counter)
4889                                 tp->serdes_counter--;
4890                         else {
4891                                 if (workaround) {
4892                                         u32 val = serdes_cfg;
4893
4894                                         if (port_a)
4895                                                 val |= 0xc010000;
4896                                         else
4897                                                 val |= 0x4010000;
4898
4899                                         tw32_f(MAC_SERDES_CFG, val);
4900                                 }
4901
4902                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4903                                 udelay(40);
4904
4905                                 /* Link parallel detection - link is up */
4906                                 /* only if we have PCS_SYNC and not */
4907                                 /* receiving config code words */
4908                                 mac_status = tr32(MAC_STATUS);
4909                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4910                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4911                                         tg3_setup_flow_control(tp, 0, 0);
4912                                         current_link_up = 1;
4913                                         tp->phy_flags |=
4914                                                 TG3_PHYFLG_PARALLEL_DETECT;
4915                                         tp->serdes_counter =
4916                                                 SERDES_PARALLEL_DET_TIMEOUT;
4917                                 } else
4918                                         goto restart_autoneg;
4919                         }
4920                 }
4921         } else {
4922                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4923                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4924         }
4925
4926 out:
4927         return current_link_up;
4928 }
4929
4930 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4931 {
4932         int current_link_up = 0;
4933
4934         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4935                 goto out;
4936
4937         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4938                 u32 txflags, rxflags;
4939                 int i;
4940
4941                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4942                         u32 local_adv = 0, remote_adv = 0;
4943
4944                         if (txflags & ANEG_CFG_PS1)
4945                                 local_adv |= ADVERTISE_1000XPAUSE;
4946                         if (txflags & ANEG_CFG_PS2)
4947                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4948
4949                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4950                                 remote_adv |= LPA_1000XPAUSE;
4951                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4952                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4953
4954                         tp->link_config.rmt_adv =
4955                                            mii_adv_to_ethtool_adv_x(remote_adv);
4956
4957                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4958
4959                         current_link_up = 1;
4960                 }
4961                 for (i = 0; i < 30; i++) {
4962                         udelay(20);
4963                         tw32_f(MAC_STATUS,
4964                                (MAC_STATUS_SYNC_CHANGED |
4965                                 MAC_STATUS_CFG_CHANGED));
4966                         udelay(40);
4967                         if ((tr32(MAC_STATUS) &
4968                              (MAC_STATUS_SYNC_CHANGED |
4969                               MAC_STATUS_CFG_CHANGED)) == 0)
4970                                 break;
4971                 }
4972
4973                 mac_status = tr32(MAC_STATUS);
4974                 if (current_link_up == 0 &&
4975                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4976                     !(mac_status & MAC_STATUS_RCVD_CFG))
4977                         current_link_up = 1;
4978         } else {
4979                 tg3_setup_flow_control(tp, 0, 0);
4980
4981                 /* Forcing 1000FD link up. */
4982                 current_link_up = 1;
4983
4984                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4985                 udelay(40);
4986
4987                 tw32_f(MAC_MODE, tp->mac_mode);
4988                 udelay(40);
4989         }
4990
4991 out:
4992         return current_link_up;
4993 }
4994
4995 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4996 {
4997         u32 orig_pause_cfg;
4998         u16 orig_active_speed;
4999         u8 orig_active_duplex;
5000         u32 mac_status;
5001         int current_link_up;
5002         int i;
5003
5004         orig_pause_cfg = tp->link_config.active_flowctrl;
5005         orig_active_speed = tp->link_config.active_speed;
5006         orig_active_duplex = tp->link_config.active_duplex;
5007
5008         if (!tg3_flag(tp, HW_AUTONEG) &&
5009             netif_carrier_ok(tp->dev) &&
5010             tg3_flag(tp, INIT_COMPLETE)) {
5011                 mac_status = tr32(MAC_STATUS);
5012                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5013                                MAC_STATUS_SIGNAL_DET |
5014                                MAC_STATUS_CFG_CHANGED |
5015                                MAC_STATUS_RCVD_CFG);
5016                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5017                                    MAC_STATUS_SIGNAL_DET)) {
5018                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5019                                             MAC_STATUS_CFG_CHANGED));
5020                         return 0;
5021                 }
5022         }
5023
5024         tw32_f(MAC_TX_AUTO_NEG, 0);
5025
5026         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5027         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5028         tw32_f(MAC_MODE, tp->mac_mode);
5029         udelay(40);
5030
5031         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5032                 tg3_init_bcm8002(tp);
5033
5034         /* Enable link change event even when serdes polling.  */
5035         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5036         udelay(40);
5037
5038         current_link_up = 0;
5039         tp->link_config.rmt_adv = 0;
5040         mac_status = tr32(MAC_STATUS);
5041
5042         if (tg3_flag(tp, HW_AUTONEG))
5043                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5044         else
5045                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5046
5047         tp->napi[0].hw_status->status =
5048                 (SD_STATUS_UPDATED |
5049                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5050
5051         for (i = 0; i < 100; i++) {
5052                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5053                                     MAC_STATUS_CFG_CHANGED));
5054                 udelay(5);
5055                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5056                                          MAC_STATUS_CFG_CHANGED |
5057                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5058                         break;
5059         }
5060
5061         mac_status = tr32(MAC_STATUS);
5062         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5063                 current_link_up = 0;
5064                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5065                     tp->serdes_counter == 0) {
5066                         tw32_f(MAC_MODE, (tp->mac_mode |
5067                                           MAC_MODE_SEND_CONFIGS));
5068                         udelay(1);
5069                         tw32_f(MAC_MODE, tp->mac_mode);
5070                 }
5071         }
5072
5073         if (current_link_up == 1) {
5074                 tp->link_config.active_speed = SPEED_1000;
5075                 tp->link_config.active_duplex = DUPLEX_FULL;
5076                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5077                                     LED_CTRL_LNKLED_OVERRIDE |
5078                                     LED_CTRL_1000MBPS_ON));
5079         } else {
5080                 tp->link_config.active_speed = SPEED_INVALID;
5081                 tp->link_config.active_duplex = DUPLEX_INVALID;
5082                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5083                                     LED_CTRL_LNKLED_OVERRIDE |
5084                                     LED_CTRL_TRAFFIC_OVERRIDE));
5085         }
5086
5087         if (current_link_up != netif_carrier_ok(tp->dev)) {
5088                 if (current_link_up)
5089                         netif_carrier_on(tp->dev);
5090                 else
5091                         netif_carrier_off(tp->dev);
5092                 tg3_link_report(tp);
5093         } else {
5094                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5095                 if (orig_pause_cfg != now_pause_cfg ||
5096                     orig_active_speed != tp->link_config.active_speed ||
5097                     orig_active_duplex != tp->link_config.active_duplex)
5098                         tg3_link_report(tp);
5099         }
5100
5101         return 0;
5102 }
5103
5104 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5105 {
5106         int current_link_up, err = 0;
5107         u32 bmsr, bmcr;
5108         u16 current_speed;
5109         u8 current_duplex;
5110         u32 local_adv, remote_adv;
5111
5112         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5113         tw32_f(MAC_MODE, tp->mac_mode);
5114         udelay(40);
5115
5116         tw32(MAC_EVENT, 0);
5117
5118         tw32_f(MAC_STATUS,
5119              (MAC_STATUS_SYNC_CHANGED |
5120               MAC_STATUS_CFG_CHANGED |
5121               MAC_STATUS_MI_COMPLETION |
5122               MAC_STATUS_LNKSTATE_CHANGED));
5123         udelay(40);
5124
5125         if (force_reset)
5126                 tg3_phy_reset(tp);
5127
5128         current_link_up = 0;
5129         current_speed = SPEED_INVALID;
5130         current_duplex = DUPLEX_INVALID;
5131         tp->link_config.rmt_adv = 0;
5132
5133         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5134         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5136                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5137                         bmsr |= BMSR_LSTATUS;
5138                 else
5139                         bmsr &= ~BMSR_LSTATUS;
5140         }
5141
5142         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5143
5144         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5145             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5146                 /* do nothing, just check for link up at the end */
5147         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5148                 u32 adv, newadv;
5149
5150                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5151                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5152                                  ADVERTISE_1000XPAUSE |
5153                                  ADVERTISE_1000XPSE_ASYM |
5154                                  ADVERTISE_SLCT);
5155
5156                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5157                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5158
5159                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5160                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5161                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5162                         tg3_writephy(tp, MII_BMCR, bmcr);
5163
5164                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5165                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5166                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5167
5168                         return err;
5169                 }
5170         } else {
5171                 u32 new_bmcr;
5172
5173                 bmcr &= ~BMCR_SPEED1000;
5174                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5175
5176                 if (tp->link_config.duplex == DUPLEX_FULL)
5177                         new_bmcr |= BMCR_FULLDPLX;
5178
5179                 if (new_bmcr != bmcr) {
5180                         /* BMCR_SPEED1000 is a reserved bit that needs
5181                          * to be set on write.
5182                          */
5183                         new_bmcr |= BMCR_SPEED1000;
5184
5185                         /* Force a linkdown */
5186                         if (netif_carrier_ok(tp->dev)) {
5187                                 u32 adv;
5188
5189                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5190                                 adv &= ~(ADVERTISE_1000XFULL |
5191                                          ADVERTISE_1000XHALF |
5192                                          ADVERTISE_SLCT);
5193                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5194                                 tg3_writephy(tp, MII_BMCR, bmcr |
5195                                                            BMCR_ANRESTART |
5196                                                            BMCR_ANENABLE);
5197                                 udelay(10);
5198                                 netif_carrier_off(tp->dev);
5199                         }
5200                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5201                         bmcr = new_bmcr;
5202                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5203                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5204                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5205                             ASIC_REV_5714) {
5206                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5207                                         bmsr |= BMSR_LSTATUS;
5208                                 else
5209                                         bmsr &= ~BMSR_LSTATUS;
5210                         }
5211                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5212                 }
5213         }
5214
5215         if (bmsr & BMSR_LSTATUS) {
5216                 current_speed = SPEED_1000;
5217                 current_link_up = 1;
5218                 if (bmcr & BMCR_FULLDPLX)
5219                         current_duplex = DUPLEX_FULL;
5220                 else
5221                         current_duplex = DUPLEX_HALF;
5222
5223                 local_adv = 0;
5224                 remote_adv = 0;
5225
5226                 if (bmcr & BMCR_ANENABLE) {
5227                         u32 common;
5228
5229                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5230                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5231                         common = local_adv & remote_adv;
5232                         if (common & (ADVERTISE_1000XHALF |
5233                                       ADVERTISE_1000XFULL)) {
5234                                 if (common & ADVERTISE_1000XFULL)
5235                                         current_duplex = DUPLEX_FULL;
5236                                 else
5237                                         current_duplex = DUPLEX_HALF;
5238
5239                                 tp->link_config.rmt_adv =
5240                                            mii_adv_to_ethtool_adv_x(remote_adv);
5241                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5242                                 /* Link is up via parallel detect */
5243                         } else {
5244                                 current_link_up = 0;
5245                         }
5246                 }
5247         }
5248
5249         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5250                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5251
5252         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5253         if (tp->link_config.active_duplex == DUPLEX_HALF)
5254                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5255
5256         tw32_f(MAC_MODE, tp->mac_mode);
5257         udelay(40);
5258
5259         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5260
5261         tp->link_config.active_speed = current_speed;
5262         tp->link_config.active_duplex = current_duplex;
5263
5264         if (current_link_up != netif_carrier_ok(tp->dev)) {
5265                 if (current_link_up)
5266                         netif_carrier_on(tp->dev);
5267                 else {
5268                         netif_carrier_off(tp->dev);
5269                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5270                 }
5271                 tg3_link_report(tp);
5272         }
5273         return err;
5274 }
5275
5276 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5277 {
5278         if (tp->serdes_counter) {
5279                 /* Give autoneg time to complete. */
5280                 tp->serdes_counter--;
5281                 return;
5282         }
5283
5284         if (!netif_carrier_ok(tp->dev) &&
5285             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5286                 u32 bmcr;
5287
5288                 tg3_readphy(tp, MII_BMCR, &bmcr);
5289                 if (bmcr & BMCR_ANENABLE) {
5290                         u32 phy1, phy2;
5291
5292                         /* Select shadow register 0x1f */
5293                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5294                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5295
5296                         /* Select expansion interrupt status register */
5297                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5298                                          MII_TG3_DSP_EXP1_INT_STAT);
5299                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5300                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301
5302                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5303                                 /* We have signal detect and not receiving
5304                                  * config code words, link is up by parallel
5305                                  * detection.
5306                                  */
5307
5308                                 bmcr &= ~BMCR_ANENABLE;
5309                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5310                                 tg3_writephy(tp, MII_BMCR, bmcr);
5311                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5312                         }
5313                 }
5314         } else if (netif_carrier_ok(tp->dev) &&
5315                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5316                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5317                 u32 phy2;
5318
5319                 /* Select expansion interrupt status register */
5320                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5321                                  MII_TG3_DSP_EXP1_INT_STAT);
5322                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5323                 if (phy2 & 0x20) {
5324                         u32 bmcr;
5325
5326                         /* Config code words received, turn on autoneg. */
5327                         tg3_readphy(tp, MII_BMCR, &bmcr);
5328                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5329
5330                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5331
5332                 }
5333         }
5334 }
5335
5336 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5337 {
5338         u32 val;
5339         int err;
5340
5341         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5342                 err = tg3_setup_fiber_phy(tp, force_reset);
5343         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5344                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5345         else
5346                 err = tg3_setup_copper_phy(tp, force_reset);
5347
5348         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5349                 u32 scale;
5350
5351                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5352                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5353                         scale = 65;
5354                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5355                         scale = 6;
5356                 else
5357                         scale = 12;
5358
5359                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5360                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5361                 tw32(GRC_MISC_CFG, val);
5362         }
5363
5364         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5365               (6 << TX_LENGTHS_IPG_SHIFT);
5366         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5367                 val |= tr32(MAC_TX_LENGTHS) &
5368                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5369                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5370
5371         if (tp->link_config.active_speed == SPEED_1000 &&
5372             tp->link_config.active_duplex == DUPLEX_HALF)
5373                 tw32(MAC_TX_LENGTHS, val |
5374                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5375         else
5376                 tw32(MAC_TX_LENGTHS, val |
5377                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5378
5379         if (!tg3_flag(tp, 5705_PLUS)) {
5380                 if (netif_carrier_ok(tp->dev)) {
5381                         tw32(HOSTCC_STAT_COAL_TICKS,
5382                              tp->coal.stats_block_coalesce_usecs);
5383                 } else {
5384                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5385                 }
5386         }
5387
5388         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5389                 val = tr32(PCIE_PWR_MGMT_THRESH);
5390                 if (!netif_carrier_ok(tp->dev))
5391                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5392                               tp->pwrmgmt_thresh;
5393                 else
5394                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5395                 tw32(PCIE_PWR_MGMT_THRESH, val);
5396         }
5397
5398         return err;
5399 }
5400
5401 static inline int tg3_irq_sync(struct tg3 *tp)
5402 {
5403         return tp->irq_sync;
5404 }
5405
5406 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5407 {
5408         int i;
5409
5410         dst = (u32 *)((u8 *)dst + off);
5411         for (i = 0; i < len; i += sizeof(u32))
5412                 *dst++ = tr32(off + i);
5413 }
5414
5415 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5416 {
5417         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5418         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5419         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5420         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5421         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5422         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5423         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5424         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5425         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5426         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5427         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5428         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5429         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5430         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5431         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5432         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5433         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5434         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5435         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5436
5437         if (tg3_flag(tp, SUPPORT_MSIX))
5438                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5439
5440         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5441         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5442         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5443         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5444         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5445         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5446         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5447         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5448
5449         if (!tg3_flag(tp, 5705_PLUS)) {
5450                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5451                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5452                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5453         }
5454
5455         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5456         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5457         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5458         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5459         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5460
5461         if (tg3_flag(tp, NVRAM))
5462                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5463 }
5464
5465 static void tg3_dump_state(struct tg3 *tp)
5466 {
5467         int i;
5468         u32 *regs;
5469
5470         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5471         if (!regs) {
5472                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5473                 return;
5474         }
5475
5476         if (tg3_flag(tp, PCI_EXPRESS)) {
5477                 /* Read up to but not including private PCI registers */
5478                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5479                         regs[i / sizeof(u32)] = tr32(i);
5480         } else
5481                 tg3_dump_legacy_regs(tp, regs);
5482
5483         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5484                 if (!regs[i + 0] && !regs[i + 1] &&
5485                     !regs[i + 2] && !regs[i + 3])
5486                         continue;
5487
5488                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5489                            i * 4,
5490                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5491         }
5492
5493         kfree(regs);
5494
5495         for (i = 0; i < tp->irq_cnt; i++) {
5496                 struct tg3_napi *tnapi = &tp->napi[i];
5497
5498                 /* SW status block */
5499                 netdev_err(tp->dev,
5500                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5501                            i,
5502                            tnapi->hw_status->status,
5503                            tnapi->hw_status->status_tag,
5504                            tnapi->hw_status->rx_jumbo_consumer,
5505                            tnapi->hw_status->rx_consumer,
5506                            tnapi->hw_status->rx_mini_consumer,
5507                            tnapi->hw_status->idx[0].rx_producer,
5508                            tnapi->hw_status->idx[0].tx_consumer);
5509
5510                 netdev_err(tp->dev,
5511                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5512                            i,
5513                            tnapi->last_tag, tnapi->last_irq_tag,
5514                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5515                            tnapi->rx_rcb_ptr,
5516                            tnapi->prodring.rx_std_prod_idx,
5517                            tnapi->prodring.rx_std_cons_idx,
5518                            tnapi->prodring.rx_jmb_prod_idx,
5519                            tnapi->prodring.rx_jmb_cons_idx);
5520         }
5521 }
5522
5523 /* This is called whenever we suspect that the system chipset is re-
5524  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5525  * is bogus tx completions. We try to recover by setting the
5526  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5527  * in the workqueue.
5528  */
5529 static void tg3_tx_recover(struct tg3 *tp)
5530 {
5531         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5532                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5533
5534         netdev_warn(tp->dev,
5535                     "The system may be re-ordering memory-mapped I/O "
5536                     "cycles to the network device, attempting to recover. "
5537                     "Please report the problem to the driver maintainer "
5538                     "and include system chipset information.\n");
5539
5540         spin_lock(&tp->lock);
5541         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5542         spin_unlock(&tp->lock);
5543 }
5544
5545 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5546 {
5547         /* Tell compiler to fetch tx indices from memory. */
5548         barrier();
5549         return tnapi->tx_pending -
5550                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5551 }
5552
5553 /* Tigon3 never reports partial packet sends.  So we do not
5554  * need special logic to handle SKBs that have not had all
5555  * of their frags sent yet, like SunGEM does.
5556  */
5557 static void tg3_tx(struct tg3_napi *tnapi)
5558 {
5559         struct tg3 *tp = tnapi->tp;
5560         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5561         u32 sw_idx = tnapi->tx_cons;
5562         struct netdev_queue *txq;
5563         int index = tnapi - tp->napi;
5564         unsigned int pkts_compl = 0, bytes_compl = 0;
5565
5566         if (tg3_flag(tp, ENABLE_TSS))
5567                 index--;
5568
5569         txq = netdev_get_tx_queue(tp->dev, index);
5570
5571         while (sw_idx != hw_idx) {
5572                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5573                 struct sk_buff *skb = ri->skb;
5574                 int i, tx_bug = 0;
5575
5576                 if (unlikely(skb == NULL)) {
5577                         tg3_tx_recover(tp);
5578                         return;
5579                 }
5580
5581                 pci_unmap_single(tp->pdev,
5582                                  dma_unmap_addr(ri, mapping),
5583                                  skb_headlen(skb),
5584                                  PCI_DMA_TODEVICE);
5585
5586                 ri->skb = NULL;
5587
5588                 while (ri->fragmented) {
5589                         ri->fragmented = false;
5590                         sw_idx = NEXT_TX(sw_idx);
5591                         ri = &tnapi->tx_buffers[sw_idx];
5592                 }
5593
5594                 sw_idx = NEXT_TX(sw_idx);
5595
5596                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5597                         ri = &tnapi->tx_buffers[sw_idx];
5598                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5599                                 tx_bug = 1;
5600
5601                         pci_unmap_page(tp->pdev,
5602                                        dma_unmap_addr(ri, mapping),
5603                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5604                                        PCI_DMA_TODEVICE);
5605
5606                         while (ri->fragmented) {
5607                                 ri->fragmented = false;
5608                                 sw_idx = NEXT_TX(sw_idx);
5609                                 ri = &tnapi->tx_buffers[sw_idx];
5610                         }
5611
5612                         sw_idx = NEXT_TX(sw_idx);
5613                 }
5614
5615                 pkts_compl++;
5616                 bytes_compl += skb->len;
5617
5618                 dev_kfree_skb(skb);
5619
5620                 if (unlikely(tx_bug)) {
5621                         tg3_tx_recover(tp);
5622                         return;
5623                 }
5624         }
5625
5626         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5627
5628         tnapi->tx_cons = sw_idx;
5629
5630         /* Need to make the tx_cons update visible to tg3_start_xmit()
5631          * before checking for netif_queue_stopped().  Without the
5632          * memory barrier, there is a small possibility that tg3_start_xmit()
5633          * will miss it and cause the queue to be stopped forever.
5634          */
5635         smp_mb();
5636
5637         if (unlikely(netif_tx_queue_stopped(txq) &&
5638                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5639                 __netif_tx_lock(txq, smp_processor_id());
5640                 if (netif_tx_queue_stopped(txq) &&
5641                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5642                         netif_tx_wake_queue(txq);
5643                 __netif_tx_unlock(txq);
5644         }
5645 }
5646
5647 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5648 {
5649         if (!ri->data)
5650                 return;
5651
5652         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5653                          map_sz, PCI_DMA_FROMDEVICE);
5654         kfree(ri->data);
5655         ri->data = NULL;
5656 }
5657
5658 /* Returns size of skb allocated or < 0 on error.
5659  *
5660  * We only need to fill in the address because the other members
5661  * of the RX descriptor are invariant, see tg3_init_rings.
5662  *
5663  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5664  * posting buffers we only dirty the first cache line of the RX
5665  * descriptor (containing the address).  Whereas for the RX status
5666  * buffers the cpu only reads the last cacheline of the RX descriptor
5667  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5668  */
5669 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5670                             u32 opaque_key, u32 dest_idx_unmasked)
5671 {
5672         struct tg3_rx_buffer_desc *desc;
5673         struct ring_info *map;
5674         u8 *data;
5675         dma_addr_t mapping;
5676         int skb_size, data_size, dest_idx;
5677
5678         switch (opaque_key) {
5679         case RXD_OPAQUE_RING_STD:
5680                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5681                 desc = &tpr->rx_std[dest_idx];
5682                 map = &tpr->rx_std_buffers[dest_idx];
5683                 data_size = tp->rx_pkt_map_sz;
5684                 break;
5685
5686         case RXD_OPAQUE_RING_JUMBO:
5687                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5688                 desc = &tpr->rx_jmb[dest_idx].std;
5689                 map = &tpr->rx_jmb_buffers[dest_idx];
5690                 data_size = TG3_RX_JMB_MAP_SZ;
5691                 break;
5692
5693         default:
5694                 return -EINVAL;
5695         }
5696
5697         /* Do not overwrite any of the map or rp information
5698          * until we are sure we can commit to a new buffer.
5699          *
5700          * Callers depend upon this behavior and assume that
5701          * we leave everything unchanged if we fail.
5702          */
5703         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5704                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5705         data = kmalloc(skb_size, GFP_ATOMIC);
5706         if (!data)
5707                 return -ENOMEM;
5708
5709         mapping = pci_map_single(tp->pdev,
5710                                  data + TG3_RX_OFFSET(tp),
5711                                  data_size,
5712                                  PCI_DMA_FROMDEVICE);
5713         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5714                 kfree(data);
5715                 return -EIO;
5716         }
5717
5718         map->data = data;
5719         dma_unmap_addr_set(map, mapping, mapping);
5720
5721         desc->addr_hi = ((u64)mapping >> 32);
5722         desc->addr_lo = ((u64)mapping & 0xffffffff);
5723
5724         return data_size;
5725 }
5726
5727 /* We only need to move over in the address because the other
5728  * members of the RX descriptor are invariant.  See notes above
5729  * tg3_alloc_rx_data for full details.
5730  */
5731 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5732                            struct tg3_rx_prodring_set *dpr,
5733                            u32 opaque_key, int src_idx,
5734                            u32 dest_idx_unmasked)
5735 {
5736         struct tg3 *tp = tnapi->tp;
5737         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5738         struct ring_info *src_map, *dest_map;
5739         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5740         int dest_idx;
5741
5742         switch (opaque_key) {
5743         case RXD_OPAQUE_RING_STD:
5744                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5745                 dest_desc = &dpr->rx_std[dest_idx];
5746                 dest_map = &dpr->rx_std_buffers[dest_idx];
5747                 src_desc = &spr->rx_std[src_idx];
5748                 src_map = &spr->rx_std_buffers[src_idx];
5749                 break;
5750
5751         case RXD_OPAQUE_RING_JUMBO:
5752                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5753                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5754                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5755                 src_desc = &spr->rx_jmb[src_idx].std;
5756                 src_map = &spr->rx_jmb_buffers[src_idx];
5757                 break;
5758
5759         default:
5760                 return;
5761         }
5762
5763         dest_map->data = src_map->data;
5764         dma_unmap_addr_set(dest_map, mapping,
5765                            dma_unmap_addr(src_map, mapping));
5766         dest_desc->addr_hi = src_desc->addr_hi;
5767         dest_desc->addr_lo = src_desc->addr_lo;
5768
5769         /* Ensure that the update to the skb happens after the physical
5770          * addresses have been transferred to the new BD location.
5771          */
5772         smp_wmb();
5773
5774         src_map->data = NULL;
5775 }
5776
5777 /* The RX ring scheme is composed of multiple rings which post fresh
5778  * buffers to the chip, and one special ring the chip uses to report
5779  * status back to the host.
5780  *
5781  * The special ring reports the status of received packets to the
5782  * host.  The chip does not write into the original descriptor the
5783  * RX buffer was obtained from.  The chip simply takes the original
5784  * descriptor as provided by the host, updates the status and length
5785  * field, then writes this into the next status ring entry.
5786  *
5787  * Each ring the host uses to post buffers to the chip is described
5788  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5789  * it is first placed into the on-chip ram.  When the packet's length
5790  * is known, it walks down the TG3_BDINFO entries to select the ring.
5791  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5792  * which is within the range of the new packet's length is chosen.
5793  *
5794  * The "separate ring for rx status" scheme may sound queer, but it makes
5795  * sense from a cache coherency perspective.  If only the host writes
5796  * to the buffer post rings, and only the chip writes to the rx status
5797  * rings, then cache lines never move beyond shared-modified state.
5798  * If both the host and chip were to write into the same ring, cache line
5799  * eviction could occur since both entities want it in an exclusive state.
5800  */
5801 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5802 {
5803         struct tg3 *tp = tnapi->tp;
5804         u32 work_mask, rx_std_posted = 0;
5805         u32 std_prod_idx, jmb_prod_idx;
5806         u32 sw_idx = tnapi->rx_rcb_ptr;
5807         u16 hw_idx;
5808         int received;
5809         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5810
5811         hw_idx = *(tnapi->rx_rcb_prod_idx);
5812         /*
5813          * We need to order the read of hw_idx and the read of
5814          * the opaque cookie.
5815          */
5816         rmb();
5817         work_mask = 0;
5818         received = 0;
5819         std_prod_idx = tpr->rx_std_prod_idx;
5820         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5821         while (sw_idx != hw_idx && budget > 0) {
5822                 struct ring_info *ri;
5823                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5824                 unsigned int len;
5825                 struct sk_buff *skb;
5826                 dma_addr_t dma_addr;
5827                 u32 opaque_key, desc_idx, *post_ptr;
5828                 u8 *data;
5829
5830                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5831                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5832                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5833                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5834                         dma_addr = dma_unmap_addr(ri, mapping);
5835                         data = ri->data;
5836                         post_ptr = &std_prod_idx;
5837                         rx_std_posted++;
5838                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5839                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5840                         dma_addr = dma_unmap_addr(ri, mapping);
5841                         data = ri->data;
5842                         post_ptr = &jmb_prod_idx;
5843                 } else
5844                         goto next_pkt_nopost;
5845
5846                 work_mask |= opaque_key;
5847
5848                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5849                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5850                 drop_it:
5851                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5852                                        desc_idx, *post_ptr);
5853                 drop_it_no_recycle:
5854                         /* Other statistics kept track of by card. */
5855                         tp->rx_dropped++;
5856                         goto next_pkt;
5857                 }
5858
5859                 prefetch(data + TG3_RX_OFFSET(tp));
5860                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5861                       ETH_FCS_LEN;
5862
5863                 if (len > TG3_RX_COPY_THRESH(tp)) {
5864                         int skb_size;
5865
5866                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5867                                                     *post_ptr);
5868                         if (skb_size < 0)
5869                                 goto drop_it;
5870
5871                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5872                                          PCI_DMA_FROMDEVICE);
5873
5874                         skb = build_skb(data);
5875                         if (!skb) {
5876                                 kfree(data);
5877                                 goto drop_it_no_recycle;
5878                         }
5879                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5880                         /* Ensure that the update to the data happens
5881                          * after the usage of the old DMA mapping.
5882                          */
5883                         smp_wmb();
5884
5885                         ri->data = NULL;
5886
5887                 } else {
5888                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5889                                        desc_idx, *post_ptr);
5890
5891                         skb = netdev_alloc_skb(tp->dev,
5892                                                len + TG3_RAW_IP_ALIGN);
5893                         if (skb == NULL)
5894                                 goto drop_it_no_recycle;
5895
5896                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5897                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5898                         memcpy(skb->data,
5899                                data + TG3_RX_OFFSET(tp),
5900                                len);
5901                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5902                 }
5903
5904                 skb_put(skb, len);
5905                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5906                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5907                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5908                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5909                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5910                 else
5911                         skb_checksum_none_assert(skb);
5912
5913                 skb->protocol = eth_type_trans(skb, tp->dev);
5914
5915                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5916                     skb->protocol != htons(ETH_P_8021Q)) {
5917                         dev_kfree_skb(skb);
5918                         goto drop_it_no_recycle;
5919                 }
5920
5921                 if (desc->type_flags & RXD_FLAG_VLAN &&
5922                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5923                         __vlan_hwaccel_put_tag(skb,
5924                                                desc->err_vlan & RXD_VLAN_MASK);
5925
5926                 napi_gro_receive(&tnapi->napi, skb);
5927
5928                 received++;
5929                 budget--;
5930
5931 next_pkt:
5932                 (*post_ptr)++;
5933
5934                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5935                         tpr->rx_std_prod_idx = std_prod_idx &
5936                                                tp->rx_std_ring_mask;
5937                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5938                                      tpr->rx_std_prod_idx);
5939                         work_mask &= ~RXD_OPAQUE_RING_STD;
5940                         rx_std_posted = 0;
5941                 }
5942 next_pkt_nopost:
5943                 sw_idx++;
5944                 sw_idx &= tp->rx_ret_ring_mask;
5945
5946                 /* Refresh hw_idx to see if there is new work */
5947                 if (sw_idx == hw_idx) {
5948                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5949                         rmb();
5950                 }
5951         }
5952
5953         /* ACK the status ring. */
5954         tnapi->rx_rcb_ptr = sw_idx;
5955         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5956
5957         /* Refill RX ring(s). */
5958         if (!tg3_flag(tp, ENABLE_RSS)) {
5959                 if (work_mask & RXD_OPAQUE_RING_STD) {
5960                         tpr->rx_std_prod_idx = std_prod_idx &
5961                                                tp->rx_std_ring_mask;
5962                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5963                                      tpr->rx_std_prod_idx);
5964                 }
5965                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5966                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5967                                                tp->rx_jmb_ring_mask;
5968                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5969                                      tpr->rx_jmb_prod_idx);
5970                 }
5971                 mmiowb();
5972         } else if (work_mask) {
5973                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5974                  * updated before the producer indices can be updated.
5975                  */
5976                 smp_wmb();
5977
5978                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5979                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5980
5981                 if (tnapi != &tp->napi[1])
5982                         napi_schedule(&tp->napi[1].napi);
5983         }
5984
5985         return received;
5986 }
5987
5988 static void tg3_poll_link(struct tg3 *tp)
5989 {
5990         /* handle link change and other phy events */
5991         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5992                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5993
5994                 if (sblk->status & SD_STATUS_LINK_CHG) {
5995                         sblk->status = SD_STATUS_UPDATED |
5996                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5997                         spin_lock(&tp->lock);
5998                         if (tg3_flag(tp, USE_PHYLIB)) {
5999                                 tw32_f(MAC_STATUS,
6000                                      (MAC_STATUS_SYNC_CHANGED |
6001                                       MAC_STATUS_CFG_CHANGED |
6002                                       MAC_STATUS_MI_COMPLETION |
6003                                       MAC_STATUS_LNKSTATE_CHANGED));
6004                                 udelay(40);
6005                         } else
6006                                 tg3_setup_phy(tp, 0);
6007                         spin_unlock(&tp->lock);
6008                 }
6009         }
6010 }
6011
6012 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6013                                 struct tg3_rx_prodring_set *dpr,
6014                                 struct tg3_rx_prodring_set *spr)
6015 {
6016         u32 si, di, cpycnt, src_prod_idx;
6017         int i, err = 0;
6018
6019         while (1) {
6020                 src_prod_idx = spr->rx_std_prod_idx;
6021
6022                 /* Make sure updates to the rx_std_buffers[] entries and the
6023                  * standard producer index are seen in the correct order.
6024                  */
6025                 smp_rmb();
6026
6027                 if (spr->rx_std_cons_idx == src_prod_idx)
6028                         break;
6029
6030                 if (spr->rx_std_cons_idx < src_prod_idx)
6031                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6032                 else
6033                         cpycnt = tp->rx_std_ring_mask + 1 -
6034                                  spr->rx_std_cons_idx;
6035
6036                 cpycnt = min(cpycnt,
6037                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6038
6039                 si = spr->rx_std_cons_idx;
6040                 di = dpr->rx_std_prod_idx;
6041
6042                 for (i = di; i < di + cpycnt; i++) {
6043                         if (dpr->rx_std_buffers[i].data) {
6044                                 cpycnt = i - di;
6045                                 err = -ENOSPC;
6046                                 break;
6047                         }
6048                 }
6049
6050                 if (!cpycnt)
6051                         break;
6052
6053                 /* Ensure that updates to the rx_std_buffers ring and the
6054                  * shadowed hardware producer ring from tg3_recycle_skb() are
6055                  * ordered correctly WRT the skb check above.
6056                  */
6057                 smp_rmb();
6058
6059                 memcpy(&dpr->rx_std_buffers[di],
6060                        &spr->rx_std_buffers[si],
6061                        cpycnt * sizeof(struct ring_info));
6062
6063                 for (i = 0; i < cpycnt; i++, di++, si++) {
6064                         struct tg3_rx_buffer_desc *sbd, *dbd;
6065                         sbd = &spr->rx_std[si];
6066                         dbd = &dpr->rx_std[di];
6067                         dbd->addr_hi = sbd->addr_hi;
6068                         dbd->addr_lo = sbd->addr_lo;
6069                 }
6070
6071                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6072                                        tp->rx_std_ring_mask;
6073                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6074                                        tp->rx_std_ring_mask;
6075         }
6076
6077         while (1) {
6078                 src_prod_idx = spr->rx_jmb_prod_idx;
6079
6080                 /* Make sure updates to the rx_jmb_buffers[] entries and
6081                  * the jumbo producer index are seen in the correct order.
6082                  */
6083                 smp_rmb();
6084
6085                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6086                         break;
6087
6088                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6089                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6090                 else
6091                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6092                                  spr->rx_jmb_cons_idx;
6093
6094                 cpycnt = min(cpycnt,
6095                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6096
6097                 si = spr->rx_jmb_cons_idx;
6098                 di = dpr->rx_jmb_prod_idx;
6099
6100                 for (i = di; i < di + cpycnt; i++) {
6101                         if (dpr->rx_jmb_buffers[i].data) {
6102                                 cpycnt = i - di;
6103                                 err = -ENOSPC;
6104                                 break;
6105                         }
6106                 }
6107
6108                 if (!cpycnt)
6109                         break;
6110
6111                 /* Ensure that updates to the rx_jmb_buffers ring and the
6112                  * shadowed hardware producer ring from tg3_recycle_skb() are
6113                  * ordered correctly WRT the skb check above.
6114                  */
6115                 smp_rmb();
6116
6117                 memcpy(&dpr->rx_jmb_buffers[di],
6118                        &spr->rx_jmb_buffers[si],
6119                        cpycnt * sizeof(struct ring_info));
6120
6121                 for (i = 0; i < cpycnt; i++, di++, si++) {
6122                         struct tg3_rx_buffer_desc *sbd, *dbd;
6123                         sbd = &spr->rx_jmb[si].std;
6124                         dbd = &dpr->rx_jmb[di].std;
6125                         dbd->addr_hi = sbd->addr_hi;
6126                         dbd->addr_lo = sbd->addr_lo;
6127                 }
6128
6129                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6130                                        tp->rx_jmb_ring_mask;
6131                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6132                                        tp->rx_jmb_ring_mask;
6133         }
6134
6135         return err;
6136 }
6137
6138 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6139 {
6140         struct tg3 *tp = tnapi->tp;
6141
6142         /* run TX completion thread */
6143         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6144                 tg3_tx(tnapi);
6145                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6146                         return work_done;
6147         }
6148
6149         /* run RX thread, within the bounds set by NAPI.
6150          * All RX "locking" is done by ensuring outside
6151          * code synchronizes with tg3->napi.poll()
6152          */
6153         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6154                 work_done += tg3_rx(tnapi, budget - work_done);
6155
6156         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6157                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6158                 int i, err = 0;
6159                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6160                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6161
6162                 for (i = 1; i < tp->irq_cnt; i++)
6163                         err |= tg3_rx_prodring_xfer(tp, dpr,
6164                                                     &tp->napi[i].prodring);
6165
6166                 wmb();
6167
6168                 if (std_prod_idx != dpr->rx_std_prod_idx)
6169                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6170                                      dpr->rx_std_prod_idx);
6171
6172                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6173                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6174                                      dpr->rx_jmb_prod_idx);
6175
6176                 mmiowb();
6177
6178                 if (err)
6179                         tw32_f(HOSTCC_MODE, tp->coal_now);
6180         }
6181
6182         return work_done;
6183 }
6184
6185 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6186 {
6187         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6188                 schedule_work(&tp->reset_task);
6189 }
6190
6191 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6192 {
6193         cancel_work_sync(&tp->reset_task);
6194         tg3_flag_clear(tp, RESET_TASK_PENDING);
6195 }
6196
6197 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6198 {
6199         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6200         struct tg3 *tp = tnapi->tp;
6201         int work_done = 0;
6202         struct tg3_hw_status *sblk = tnapi->hw_status;
6203
6204         while (1) {
6205                 work_done = tg3_poll_work(tnapi, work_done, budget);
6206
6207                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6208                         goto tx_recovery;
6209
6210                 if (unlikely(work_done >= budget))
6211                         break;
6212
6213                 /* tp->last_tag is used in tg3_int_reenable() below
6214                  * to tell the hw how much work has been processed,
6215                  * so we must read it before checking for more work.
6216                  */
6217                 tnapi->last_tag = sblk->status_tag;
6218                 tnapi->last_irq_tag = tnapi->last_tag;
6219                 rmb();
6220
6221                 /* check for RX/TX work to do */
6222                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6223                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6224                         napi_complete(napi);
6225                         /* Reenable interrupts. */
6226                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6227                         mmiowb();
6228                         break;
6229                 }
6230         }
6231
6232         return work_done;
6233
6234 tx_recovery:
6235         /* work_done is guaranteed to be less than budget. */
6236         napi_complete(napi);
6237         tg3_reset_task_schedule(tp);
6238         return work_done;
6239 }
6240
6241 static void tg3_process_error(struct tg3 *tp)
6242 {
6243         u32 val;
6244         bool real_error = false;
6245
6246         if (tg3_flag(tp, ERROR_PROCESSED))
6247                 return;
6248
6249         /* Check Flow Attention register */
6250         val = tr32(HOSTCC_FLOW_ATTN);
6251         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6252                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6253                 real_error = true;
6254         }
6255
6256         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6257                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6258                 real_error = true;
6259         }
6260
6261         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6262                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6263                 real_error = true;
6264         }
6265
6266         if (!real_error)
6267                 return;
6268
6269         tg3_dump_state(tp);
6270
6271         tg3_flag_set(tp, ERROR_PROCESSED);
6272         tg3_reset_task_schedule(tp);
6273 }
6274
6275 static int tg3_poll(struct napi_struct *napi, int budget)
6276 {
6277         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6278         struct tg3 *tp = tnapi->tp;
6279         int work_done = 0;
6280         struct tg3_hw_status *sblk = tnapi->hw_status;
6281
6282         while (1) {
6283                 if (sblk->status & SD_STATUS_ERROR)
6284                         tg3_process_error(tp);
6285
6286                 tg3_poll_link(tp);
6287
6288                 work_done = tg3_poll_work(tnapi, work_done, budget);
6289
6290                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6291                         goto tx_recovery;
6292
6293                 if (unlikely(work_done >= budget))
6294                         break;
6295
6296                 if (tg3_flag(tp, TAGGED_STATUS)) {
6297                         /* tp->last_tag is used in tg3_int_reenable() below
6298                          * to tell the hw how much work has been processed,
6299                          * so we must read it before checking for more work.
6300                          */
6301                         tnapi->last_tag = sblk->status_tag;
6302                         tnapi->last_irq_tag = tnapi->last_tag;
6303                         rmb();
6304                 } else
6305                         sblk->status &= ~SD_STATUS_UPDATED;
6306
6307                 if (likely(!tg3_has_work(tnapi))) {
6308                         napi_complete(napi);
6309                         tg3_int_reenable(tnapi);
6310                         break;
6311                 }
6312         }
6313
6314         return work_done;
6315
6316 tx_recovery:
6317         /* work_done is guaranteed to be less than budget. */
6318         napi_complete(napi);
6319         tg3_reset_task_schedule(tp);
6320         return work_done;
6321 }
6322
6323 static void tg3_napi_disable(struct tg3 *tp)
6324 {
6325         int i;
6326
6327         for (i = tp->irq_cnt - 1; i >= 0; i--)
6328                 napi_disable(&tp->napi[i].napi);
6329 }
6330
6331 static void tg3_napi_enable(struct tg3 *tp)
6332 {
6333         int i;
6334
6335         for (i = 0; i < tp->irq_cnt; i++)
6336                 napi_enable(&tp->napi[i].napi);
6337 }
6338
6339 static void tg3_napi_init(struct tg3 *tp)
6340 {
6341         int i;
6342
6343         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6344         for (i = 1; i < tp->irq_cnt; i++)
6345                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6346 }
6347
6348 static void tg3_napi_fini(struct tg3 *tp)
6349 {
6350         int i;
6351
6352         for (i = 0; i < tp->irq_cnt; i++)
6353                 netif_napi_del(&tp->napi[i].napi);
6354 }
6355
6356 static inline void tg3_netif_stop(struct tg3 *tp)
6357 {
6358         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6359         tg3_napi_disable(tp);
6360         netif_tx_disable(tp->dev);
6361 }
6362
6363 static inline void tg3_netif_start(struct tg3 *tp)
6364 {
6365         /* NOTE: unconditional netif_tx_wake_all_queues is only
6366          * appropriate so long as all callers are assured to
6367          * have free tx slots (such as after tg3_init_hw)
6368          */
6369         netif_tx_wake_all_queues(tp->dev);
6370
6371         tg3_napi_enable(tp);
6372         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6373         tg3_enable_ints(tp);
6374 }
6375
6376 static void tg3_irq_quiesce(struct tg3 *tp)
6377 {
6378         int i;
6379
6380         BUG_ON(tp->irq_sync);
6381
6382         tp->irq_sync = 1;
6383         smp_mb();
6384
6385         for (i = 0; i < tp->irq_cnt; i++)
6386                 synchronize_irq(tp->napi[i].irq_vec);
6387 }
6388
6389 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6390  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6391  * with as well.  Most of the time, this is not necessary except when
6392  * shutting down the device.
6393  */
6394 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6395 {
6396         spin_lock_bh(&tp->lock);
6397         if (irq_sync)
6398                 tg3_irq_quiesce(tp);
6399 }
6400
6401 static inline void tg3_full_unlock(struct tg3 *tp)
6402 {
6403         spin_unlock_bh(&tp->lock);
6404 }
6405
6406 /* One-shot MSI handler - Chip automatically disables interrupt
6407  * after sending MSI so driver doesn't have to do it.
6408  */
6409 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6410 {
6411         struct tg3_napi *tnapi = dev_id;
6412         struct tg3 *tp = tnapi->tp;
6413
6414         prefetch(tnapi->hw_status);
6415         if (tnapi->rx_rcb)
6416                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6417
6418         if (likely(!tg3_irq_sync(tp)))
6419                 napi_schedule(&tnapi->napi);
6420
6421         return IRQ_HANDLED;
6422 }
6423
6424 /* MSI ISR - No need to check for interrupt sharing and no need to
6425  * flush status block and interrupt mailbox. PCI ordering rules
6426  * guarantee that MSI will arrive after the status block.
6427  */
6428 static irqreturn_t tg3_msi(int irq, void *dev_id)
6429 {
6430         struct tg3_napi *tnapi = dev_id;
6431         struct tg3 *tp = tnapi->tp;
6432
6433         prefetch(tnapi->hw_status);
6434         if (tnapi->rx_rcb)
6435                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6436         /*
6437          * Writing any value to intr-mbox-0 clears PCI INTA# and
6438          * chip-internal interrupt pending events.
6439          * Writing non-zero to intr-mbox-0 additional tells the
6440          * NIC to stop sending us irqs, engaging "in-intr-handler"
6441          * event coalescing.
6442          */
6443         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6444         if (likely(!tg3_irq_sync(tp)))
6445                 napi_schedule(&tnapi->napi);
6446
6447         return IRQ_RETVAL(1);
6448 }
6449
6450 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6451 {
6452         struct tg3_napi *tnapi = dev_id;
6453         struct tg3 *tp = tnapi->tp;
6454         struct tg3_hw_status *sblk = tnapi->hw_status;
6455         unsigned int handled = 1;
6456
6457         /* In INTx mode, it is possible for the interrupt to arrive at
6458          * the CPU before the status block posted prior to the interrupt.
6459          * Reading the PCI State register will confirm whether the
6460          * interrupt is ours and will flush the status block.
6461          */
6462         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6463                 if (tg3_flag(tp, CHIP_RESETTING) ||
6464                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6465                         handled = 0;
6466                         goto out;
6467                 }
6468         }
6469
6470         /*
6471          * Writing any value to intr-mbox-0 clears PCI INTA# and
6472          * chip-internal interrupt pending events.
6473          * Writing non-zero to intr-mbox-0 additional tells the
6474          * NIC to stop sending us irqs, engaging "in-intr-handler"
6475          * event coalescing.
6476          *
6477          * Flush the mailbox to de-assert the IRQ immediately to prevent
6478          * spurious interrupts.  The flush impacts performance but
6479          * excessive spurious interrupts can be worse in some cases.
6480          */
6481         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6482         if (tg3_irq_sync(tp))
6483                 goto out;
6484         sblk->status &= ~SD_STATUS_UPDATED;
6485         if (likely(tg3_has_work(tnapi))) {
6486                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6487                 napi_schedule(&tnapi->napi);
6488         } else {
6489                 /* No work, shared interrupt perhaps?  re-enable
6490                  * interrupts, and flush that PCI write
6491                  */
6492                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6493                                0x00000000);
6494         }
6495 out:
6496         return IRQ_RETVAL(handled);
6497 }
6498
6499 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6500 {
6501         struct tg3_napi *tnapi = dev_id;
6502         struct tg3 *tp = tnapi->tp;
6503         struct tg3_hw_status *sblk = tnapi->hw_status;
6504         unsigned int handled = 1;
6505
6506         /* In INTx mode, it is possible for the interrupt to arrive at
6507          * the CPU before the status block posted prior to the interrupt.
6508          * Reading the PCI State register will confirm whether the
6509          * interrupt is ours and will flush the status block.
6510          */
6511         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6512                 if (tg3_flag(tp, CHIP_RESETTING) ||
6513                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6514                         handled = 0;
6515                         goto out;
6516                 }
6517         }
6518
6519         /*
6520          * writing any value to intr-mbox-0 clears PCI INTA# and
6521          * chip-internal interrupt pending events.
6522          * writing non-zero to intr-mbox-0 additional tells the
6523          * NIC to stop sending us irqs, engaging "in-intr-handler"
6524          * event coalescing.
6525          *
6526          * Flush the mailbox to de-assert the IRQ immediately to prevent
6527          * spurious interrupts.  The flush impacts performance but
6528          * excessive spurious interrupts can be worse in some cases.
6529          */
6530         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6531
6532         /*
6533          * In a shared interrupt configuration, sometimes other devices'
6534          * interrupts will scream.  We record the current status tag here
6535          * so that the above check can report that the screaming interrupts
6536          * are unhandled.  Eventually they will be silenced.
6537          */
6538         tnapi->last_irq_tag = sblk->status_tag;
6539
6540         if (tg3_irq_sync(tp))
6541                 goto out;
6542
6543         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6544
6545         napi_schedule(&tnapi->napi);
6546
6547 out:
6548         return IRQ_RETVAL(handled);
6549 }
6550
6551 /* ISR for interrupt test */
6552 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6553 {
6554         struct tg3_napi *tnapi = dev_id;
6555         struct tg3 *tp = tnapi->tp;
6556         struct tg3_hw_status *sblk = tnapi->hw_status;
6557
6558         if ((sblk->status & SD_STATUS_UPDATED) ||
6559             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6560                 tg3_disable_ints(tp);
6561                 return IRQ_RETVAL(1);
6562         }
6563         return IRQ_RETVAL(0);
6564 }
6565
6566 #ifdef CONFIG_NET_POLL_CONTROLLER
6567 static void tg3_poll_controller(struct net_device *dev)
6568 {
6569         int i;
6570         struct tg3 *tp = netdev_priv(dev);
6571
6572         for (i = 0; i < tp->irq_cnt; i++)
6573                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6574 }
6575 #endif
6576
6577 static void tg3_tx_timeout(struct net_device *dev)
6578 {
6579         struct tg3 *tp = netdev_priv(dev);
6580
6581         if (netif_msg_tx_err(tp)) {
6582                 netdev_err(dev, "transmit timed out, resetting\n");
6583                 tg3_dump_state(tp);
6584         }
6585
6586         tg3_reset_task_schedule(tp);
6587 }
6588
6589 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6590 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6591 {
6592         u32 base = (u32) mapping & 0xffffffff;
6593
6594         return (base > 0xffffdcc0) && (base + len + 8 < base);
6595 }
6596
6597 /* Test for DMA addresses > 40-bit */
6598 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6599                                           int len)
6600 {
6601 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6602         if (tg3_flag(tp, 40BIT_DMA_BUG))
6603                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6604         return 0;
6605 #else
6606         return 0;
6607 #endif
6608 }
6609
6610 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6611                                  dma_addr_t mapping, u32 len, u32 flags,
6612                                  u32 mss, u32 vlan)
6613 {
6614         txbd->addr_hi = ((u64) mapping >> 32);
6615         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6616         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6617         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6618 }
6619
6620 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6621                             dma_addr_t map, u32 len, u32 flags,
6622                             u32 mss, u32 vlan)
6623 {
6624         struct tg3 *tp = tnapi->tp;
6625         bool hwbug = false;
6626
6627         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6628                 hwbug = true;
6629
6630         if (tg3_4g_overflow_test(map, len))
6631                 hwbug = true;
6632
6633         if (tg3_40bit_overflow_test(tp, map, len))
6634                 hwbug = true;
6635
6636         if (tp->dma_limit) {
6637                 u32 prvidx = *entry;
6638                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6639                 while (len > tp->dma_limit && *budget) {
6640                         u32 frag_len = tp->dma_limit;
6641                         len -= tp->dma_limit;
6642
6643                         /* Avoid the 8byte DMA problem */
6644                         if (len <= 8) {
6645                                 len += tp->dma_limit / 2;
6646                                 frag_len = tp->dma_limit / 2;
6647                         }
6648
6649                         tnapi->tx_buffers[*entry].fragmented = true;
6650
6651                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6652                                       frag_len, tmp_flag, mss, vlan);
6653                         *budget -= 1;
6654                         prvidx = *entry;
6655                         *entry = NEXT_TX(*entry);
6656
6657                         map += frag_len;
6658                 }
6659
6660                 if (len) {
6661                         if (*budget) {
6662                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6663                                               len, flags, mss, vlan);
6664                                 *budget -= 1;
6665                                 *entry = NEXT_TX(*entry);
6666                         } else {
6667                                 hwbug = true;
6668                                 tnapi->tx_buffers[prvidx].fragmented = false;
6669                         }
6670                 }
6671         } else {
6672                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6673                               len, flags, mss, vlan);
6674                 *entry = NEXT_TX(*entry);
6675         }
6676
6677         return hwbug;
6678 }
6679
6680 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6681 {
6682         int i;
6683         struct sk_buff *skb;
6684         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6685
6686         skb = txb->skb;
6687         txb->skb = NULL;
6688
6689         pci_unmap_single(tnapi->tp->pdev,
6690                          dma_unmap_addr(txb, mapping),
6691                          skb_headlen(skb),
6692                          PCI_DMA_TODEVICE);
6693
6694         while (txb->fragmented) {
6695                 txb->fragmented = false;
6696                 entry = NEXT_TX(entry);
6697                 txb = &tnapi->tx_buffers[entry];
6698         }
6699
6700         for (i = 0; i <= last; i++) {
6701                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6702
6703                 entry = NEXT_TX(entry);
6704                 txb = &tnapi->tx_buffers[entry];
6705
6706                 pci_unmap_page(tnapi->tp->pdev,
6707                                dma_unmap_addr(txb, mapping),
6708                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6709
6710                 while (txb->fragmented) {
6711                         txb->fragmented = false;
6712                         entry = NEXT_TX(entry);
6713                         txb = &tnapi->tx_buffers[entry];
6714                 }
6715         }
6716 }
6717
6718 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6719 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6720                                        struct sk_buff **pskb,
6721                                        u32 *entry, u32 *budget,
6722                                        u32 base_flags, u32 mss, u32 vlan)
6723 {
6724         struct tg3 *tp = tnapi->tp;
6725         struct sk_buff *new_skb, *skb = *pskb;
6726         dma_addr_t new_addr = 0;
6727         int ret = 0;
6728
6729         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6730                 new_skb = skb_copy(skb, GFP_ATOMIC);
6731         else {
6732                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6733
6734                 new_skb = skb_copy_expand(skb,
6735                                           skb_headroom(skb) + more_headroom,
6736                                           skb_tailroom(skb), GFP_ATOMIC);
6737         }
6738
6739         if (!new_skb) {
6740                 ret = -1;
6741         } else {
6742                 /* New SKB is guaranteed to be linear. */
6743                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6744                                           PCI_DMA_TODEVICE);
6745                 /* Make sure the mapping succeeded */
6746                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6747                         dev_kfree_skb(new_skb);
6748                         ret = -1;
6749                 } else {
6750                         u32 save_entry = *entry;
6751
6752                         base_flags |= TXD_FLAG_END;
6753
6754                         tnapi->tx_buffers[*entry].skb = new_skb;
6755                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6756                                            mapping, new_addr);
6757
6758                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6759                                             new_skb->len, base_flags,
6760                                             mss, vlan)) {
6761                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6762                                 dev_kfree_skb(new_skb);
6763                                 ret = -1;
6764                         }
6765                 }
6766         }
6767
6768         dev_kfree_skb(skb);
6769         *pskb = new_skb;
6770         return ret;
6771 }
6772
6773 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6774
6775 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6776  * TSO header is greater than 80 bytes.
6777  */
6778 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6779 {
6780         struct sk_buff *segs, *nskb;
6781         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6782
6783         /* Estimate the number of fragments in the worst case */
6784         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6785                 netif_stop_queue(tp->dev);
6786
6787                 /* netif_tx_stop_queue() must be done before checking
6788                  * checking tx index in tg3_tx_avail() below, because in
6789                  * tg3_tx(), we update tx index before checking for
6790                  * netif_tx_queue_stopped().
6791                  */
6792                 smp_mb();
6793                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6794                         return NETDEV_TX_BUSY;
6795
6796                 netif_wake_queue(tp->dev);
6797         }
6798
6799         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6800         if (IS_ERR(segs))
6801                 goto tg3_tso_bug_end;
6802
6803         do {
6804                 nskb = segs;
6805                 segs = segs->next;
6806                 nskb->next = NULL;
6807                 tg3_start_xmit(nskb, tp->dev);
6808         } while (segs);
6809
6810 tg3_tso_bug_end:
6811         dev_kfree_skb(skb);
6812
6813         return NETDEV_TX_OK;
6814 }
6815
6816 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6817  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6818  */
6819 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6820 {
6821         struct tg3 *tp = netdev_priv(dev);
6822         u32 len, entry, base_flags, mss, vlan = 0;
6823         u32 budget;
6824         int i = -1, would_hit_hwbug;
6825         dma_addr_t mapping;
6826         struct tg3_napi *tnapi;
6827         struct netdev_queue *txq;
6828         unsigned int last;
6829
6830         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6831         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6832         if (tg3_flag(tp, ENABLE_TSS))
6833                 tnapi++;
6834
6835         budget = tg3_tx_avail(tnapi);
6836
6837         /* We are running in BH disabled context with netif_tx_lock
6838          * and TX reclaim runs via tp->napi.poll inside of a software
6839          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6840          * no IRQ context deadlocks to worry about either.  Rejoice!
6841          */
6842         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6843                 if (!netif_tx_queue_stopped(txq)) {
6844                         netif_tx_stop_queue(txq);
6845
6846                         /* This is a hard error, log it. */
6847                         netdev_err(dev,
6848                                    "BUG! Tx Ring full when queue awake!\n");
6849                 }
6850                 return NETDEV_TX_BUSY;
6851         }
6852
6853         entry = tnapi->tx_prod;
6854         base_flags = 0;
6855         if (skb->ip_summed == CHECKSUM_PARTIAL)
6856                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6857
6858         mss = skb_shinfo(skb)->gso_size;
6859         if (mss) {
6860                 struct iphdr *iph;
6861                 u32 tcp_opt_len, hdr_len;
6862
6863                 if (skb_header_cloned(skb) &&
6864                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6865                         goto drop;
6866
6867                 iph = ip_hdr(skb);
6868                 tcp_opt_len = tcp_optlen(skb);
6869
6870                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6871
6872                 if (!skb_is_gso_v6(skb)) {
6873                         iph->check = 0;
6874                         iph->tot_len = htons(mss + hdr_len);
6875                 }
6876
6877                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6878                     tg3_flag(tp, TSO_BUG))
6879                         return tg3_tso_bug(tp, skb);
6880
6881                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6882                                TXD_FLAG_CPU_POST_DMA);
6883
6884                 if (tg3_flag(tp, HW_TSO_1) ||
6885                     tg3_flag(tp, HW_TSO_2) ||
6886                     tg3_flag(tp, HW_TSO_3)) {
6887                         tcp_hdr(skb)->check = 0;
6888                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6889                 } else
6890                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6891                                                                  iph->daddr, 0,
6892                                                                  IPPROTO_TCP,
6893                                                                  0);
6894
6895                 if (tg3_flag(tp, HW_TSO_3)) {
6896                         mss |= (hdr_len & 0xc) << 12;
6897                         if (hdr_len & 0x10)
6898                                 base_flags |= 0x00000010;
6899                         base_flags |= (hdr_len & 0x3e0) << 5;
6900                 } else if (tg3_flag(tp, HW_TSO_2))
6901                         mss |= hdr_len << 9;
6902                 else if (tg3_flag(tp, HW_TSO_1) ||
6903                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6904                         if (tcp_opt_len || iph->ihl > 5) {
6905                                 int tsflags;
6906
6907                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6908                                 mss |= (tsflags << 11);
6909                         }
6910                 } else {
6911                         if (tcp_opt_len || iph->ihl > 5) {
6912                                 int tsflags;
6913
6914                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6915                                 base_flags |= tsflags << 12;
6916                         }
6917                 }
6918         }
6919
6920         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6921             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6922                 base_flags |= TXD_FLAG_JMB_PKT;
6923
6924         if (vlan_tx_tag_present(skb)) {
6925                 base_flags |= TXD_FLAG_VLAN;
6926                 vlan = vlan_tx_tag_get(skb);
6927         }
6928
6929         len = skb_headlen(skb);
6930
6931         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6932         if (pci_dma_mapping_error(tp->pdev, mapping))
6933                 goto drop;
6934
6935
6936         tnapi->tx_buffers[entry].skb = skb;
6937         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6938
6939         would_hit_hwbug = 0;
6940
6941         if (tg3_flag(tp, 5701_DMA_BUG))
6942                 would_hit_hwbug = 1;
6943
6944         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6945                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6946                             mss, vlan)) {
6947                 would_hit_hwbug = 1;
6948         } else if (skb_shinfo(skb)->nr_frags > 0) {
6949                 u32 tmp_mss = mss;
6950
6951                 if (!tg3_flag(tp, HW_TSO_1) &&
6952                     !tg3_flag(tp, HW_TSO_2) &&
6953                     !tg3_flag(tp, HW_TSO_3))
6954                         tmp_mss = 0;
6955
6956                 /* Now loop through additional data
6957                  * fragments, and queue them.
6958                  */
6959                 last = skb_shinfo(skb)->nr_frags - 1;
6960                 for (i = 0; i <= last; i++) {
6961                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6962
6963                         len = skb_frag_size(frag);
6964                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6965                                                    len, DMA_TO_DEVICE);
6966
6967                         tnapi->tx_buffers[entry].skb = NULL;
6968                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6969                                            mapping);
6970                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6971                                 goto dma_error;
6972
6973                         if (!budget ||
6974                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6975                                             len, base_flags |
6976                                             ((i == last) ? TXD_FLAG_END : 0),
6977                                             tmp_mss, vlan)) {
6978                                 would_hit_hwbug = 1;
6979                                 break;
6980                         }
6981                 }
6982         }
6983
6984         if (would_hit_hwbug) {
6985                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6986
6987                 /* If the workaround fails due to memory/mapping
6988                  * failure, silently drop this packet.
6989                  */
6990                 entry = tnapi->tx_prod;
6991                 budget = tg3_tx_avail(tnapi);
6992                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6993                                                 base_flags, mss, vlan))
6994                         goto drop_nofree;
6995         }
6996
6997         skb_tx_timestamp(skb);
6998         netdev_sent_queue(tp->dev, skb->len);
6999
7000         /* Packets are ready, update Tx producer idx local and on card. */
7001         tw32_tx_mbox(tnapi->prodmbox, entry);
7002
7003         tnapi->tx_prod = entry;
7004         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7005                 netif_tx_stop_queue(txq);
7006
7007                 /* netif_tx_stop_queue() must be done before checking
7008                  * checking tx index in tg3_tx_avail() below, because in
7009                  * tg3_tx(), we update tx index before checking for
7010                  * netif_tx_queue_stopped().
7011                  */
7012                 smp_mb();
7013                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7014                         netif_tx_wake_queue(txq);
7015         }
7016
7017         mmiowb();
7018         return NETDEV_TX_OK;
7019
7020 dma_error:
7021         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7022         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7023 drop:
7024         dev_kfree_skb(skb);
7025 drop_nofree:
7026         tp->tx_dropped++;
7027         return NETDEV_TX_OK;
7028 }
7029
7030 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7031 {
7032         if (enable) {
7033                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7034                                   MAC_MODE_PORT_MODE_MASK);
7035
7036                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7037
7038                 if (!tg3_flag(tp, 5705_PLUS))
7039                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7040
7041                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7042                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7043                 else
7044                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7045         } else {
7046                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7047
7048                 if (tg3_flag(tp, 5705_PLUS) ||
7049                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7050                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7051                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7052         }
7053
7054         tw32(MAC_MODE, tp->mac_mode);
7055         udelay(40);
7056 }
7057
7058 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7059 {
7060         u32 val, bmcr, mac_mode, ptest = 0;
7061
7062         tg3_phy_toggle_apd(tp, false);
7063         tg3_phy_toggle_automdix(tp, 0);
7064
7065         if (extlpbk && tg3_phy_set_extloopbk(tp))
7066                 return -EIO;
7067
7068         bmcr = BMCR_FULLDPLX;
7069         switch (speed) {
7070         case SPEED_10:
7071                 break;
7072         case SPEED_100:
7073                 bmcr |= BMCR_SPEED100;
7074                 break;
7075         case SPEED_1000:
7076         default:
7077                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7078                         speed = SPEED_100;
7079                         bmcr |= BMCR_SPEED100;
7080                 } else {
7081                         speed = SPEED_1000;
7082                         bmcr |= BMCR_SPEED1000;
7083                 }
7084         }
7085
7086         if (extlpbk) {
7087                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7088                         tg3_readphy(tp, MII_CTRL1000, &val);
7089                         val |= CTL1000_AS_MASTER |
7090                                CTL1000_ENABLE_MASTER;
7091                         tg3_writephy(tp, MII_CTRL1000, val);
7092                 } else {
7093                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7094                                 MII_TG3_FET_PTEST_TRIM_2;
7095                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7096                 }
7097         } else
7098                 bmcr |= BMCR_LOOPBACK;
7099
7100         tg3_writephy(tp, MII_BMCR, bmcr);
7101
7102         /* The write needs to be flushed for the FETs */
7103         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7104                 tg3_readphy(tp, MII_BMCR, &bmcr);
7105
7106         udelay(40);
7107
7108         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7110                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7111                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7112                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7113
7114                 /* The write needs to be flushed for the AC131 */
7115                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7116         }
7117
7118         /* Reset to prevent losing 1st rx packet intermittently */
7119         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7120             tg3_flag(tp, 5780_CLASS)) {
7121                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7122                 udelay(10);
7123                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7124         }
7125
7126         mac_mode = tp->mac_mode &
7127                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7128         if (speed == SPEED_1000)
7129                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7130         else
7131                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7132
7133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7134                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7135
7136                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7137                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7138                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7139                         mac_mode |= MAC_MODE_LINK_POLARITY;
7140
7141                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7142                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7143         }
7144
7145         tw32(MAC_MODE, mac_mode);
7146         udelay(40);
7147
7148         return 0;
7149 }
7150
7151 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7152 {
7153         struct tg3 *tp = netdev_priv(dev);
7154
7155         if (features & NETIF_F_LOOPBACK) {
7156                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7157                         return;
7158
7159                 spin_lock_bh(&tp->lock);
7160                 tg3_mac_loopback(tp, true);
7161                 netif_carrier_on(tp->dev);
7162                 spin_unlock_bh(&tp->lock);
7163                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7164         } else {
7165                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7166                         return;
7167
7168                 spin_lock_bh(&tp->lock);
7169                 tg3_mac_loopback(tp, false);
7170                 /* Force link status check */
7171                 tg3_setup_phy(tp, 1);
7172                 spin_unlock_bh(&tp->lock);
7173                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7174         }
7175 }
7176
7177 static netdev_features_t tg3_fix_features(struct net_device *dev,
7178         netdev_features_t features)
7179 {
7180         struct tg3 *tp = netdev_priv(dev);
7181
7182         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7183                 features &= ~NETIF_F_ALL_TSO;
7184
7185         return features;
7186 }
7187
7188 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7189 {
7190         netdev_features_t changed = dev->features ^ features;
7191
7192         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7193                 tg3_set_loopback(dev, features);
7194
7195         return 0;
7196 }
7197
7198 static void tg3_rx_prodring_free(struct tg3 *tp,
7199                                  struct tg3_rx_prodring_set *tpr)
7200 {
7201         int i;
7202
7203         if (tpr != &tp->napi[0].prodring) {
7204                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7205                      i = (i + 1) & tp->rx_std_ring_mask)
7206                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7207                                         tp->rx_pkt_map_sz);
7208
7209                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7210                         for (i = tpr->rx_jmb_cons_idx;
7211                              i != tpr->rx_jmb_prod_idx;
7212                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7213                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7214                                                 TG3_RX_JMB_MAP_SZ);
7215                         }
7216                 }
7217
7218                 return;
7219         }
7220
7221         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7222                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7223                                 tp->rx_pkt_map_sz);
7224
7225         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7226                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7227                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7228                                         TG3_RX_JMB_MAP_SZ);
7229         }
7230 }
7231
7232 /* Initialize rx rings for packet processing.
7233  *
7234  * The chip has been shut down and the driver detached from
7235  * the networking, so no interrupts or new tx packets will
7236  * end up in the driver.  tp->{tx,}lock are held and thus
7237  * we may not sleep.
7238  */
7239 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7240                                  struct tg3_rx_prodring_set *tpr)
7241 {
7242         u32 i, rx_pkt_dma_sz;
7243
7244         tpr->rx_std_cons_idx = 0;
7245         tpr->rx_std_prod_idx = 0;
7246         tpr->rx_jmb_cons_idx = 0;
7247         tpr->rx_jmb_prod_idx = 0;
7248
7249         if (tpr != &tp->napi[0].prodring) {
7250                 memset(&tpr->rx_std_buffers[0], 0,
7251                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7252                 if (tpr->rx_jmb_buffers)
7253                         memset(&tpr->rx_jmb_buffers[0], 0,
7254                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7255                 goto done;
7256         }
7257
7258         /* Zero out all descriptors. */
7259         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7260
7261         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7262         if (tg3_flag(tp, 5780_CLASS) &&
7263             tp->dev->mtu > ETH_DATA_LEN)
7264                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7265         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7266
7267         /* Initialize invariants of the rings, we only set this
7268          * stuff once.  This works because the card does not
7269          * write into the rx buffer posting rings.
7270          */
7271         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7272                 struct tg3_rx_buffer_desc *rxd;
7273
7274                 rxd = &tpr->rx_std[i];
7275                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7276                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7277                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7278                                (i << RXD_OPAQUE_INDEX_SHIFT));
7279         }
7280
7281         /* Now allocate fresh SKBs for each rx ring. */
7282         for (i = 0; i < tp->rx_pending; i++) {
7283                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7284                         netdev_warn(tp->dev,
7285                                     "Using a smaller RX standard ring. Only "
7286                                     "%d out of %d buffers were allocated "
7287                                     "successfully\n", i, tp->rx_pending);
7288                         if (i == 0)
7289                                 goto initfail;
7290                         tp->rx_pending = i;
7291                         break;
7292                 }
7293         }
7294
7295         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7296                 goto done;
7297
7298         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7299
7300         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7301                 goto done;
7302
7303         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7304                 struct tg3_rx_buffer_desc *rxd;
7305
7306                 rxd = &tpr->rx_jmb[i].std;
7307                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7308                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7309                                   RXD_FLAG_JUMBO;
7310                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7311                        (i << RXD_OPAQUE_INDEX_SHIFT));
7312         }
7313
7314         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7315                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7316                         netdev_warn(tp->dev,
7317                                     "Using a smaller RX jumbo ring. Only %d "
7318                                     "out of %d buffers were allocated "
7319                                     "successfully\n", i, tp->rx_jumbo_pending);
7320                         if (i == 0)
7321                                 goto initfail;
7322                         tp->rx_jumbo_pending = i;
7323                         break;
7324                 }
7325         }
7326
7327 done:
7328         return 0;
7329
7330 initfail:
7331         tg3_rx_prodring_free(tp, tpr);
7332         return -ENOMEM;
7333 }
7334
7335 static void tg3_rx_prodring_fini(struct tg3 *tp,
7336                                  struct tg3_rx_prodring_set *tpr)
7337 {
7338         kfree(tpr->rx_std_buffers);
7339         tpr->rx_std_buffers = NULL;
7340         kfree(tpr->rx_jmb_buffers);
7341         tpr->rx_jmb_buffers = NULL;
7342         if (tpr->rx_std) {
7343                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7344                                   tpr->rx_std, tpr->rx_std_mapping);
7345                 tpr->rx_std = NULL;
7346         }
7347         if (tpr->rx_jmb) {
7348                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7349                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7350                 tpr->rx_jmb = NULL;
7351         }
7352 }
7353
7354 static int tg3_rx_prodring_init(struct tg3 *tp,
7355                                 struct tg3_rx_prodring_set *tpr)
7356 {
7357         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7358                                       GFP_KERNEL);
7359         if (!tpr->rx_std_buffers)
7360                 return -ENOMEM;
7361
7362         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7363                                          TG3_RX_STD_RING_BYTES(tp),
7364                                          &tpr->rx_std_mapping,
7365                                          GFP_KERNEL);
7366         if (!tpr->rx_std)
7367                 goto err_out;
7368
7369         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7370                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7371                                               GFP_KERNEL);
7372                 if (!tpr->rx_jmb_buffers)
7373                         goto err_out;
7374
7375                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7376                                                  TG3_RX_JMB_RING_BYTES(tp),
7377                                                  &tpr->rx_jmb_mapping,
7378                                                  GFP_KERNEL);
7379                 if (!tpr->rx_jmb)
7380                         goto err_out;
7381         }
7382
7383         return 0;
7384
7385 err_out:
7386         tg3_rx_prodring_fini(tp, tpr);
7387         return -ENOMEM;
7388 }
7389
7390 /* Free up pending packets in all rx/tx rings.
7391  *
7392  * The chip has been shut down and the driver detached from
7393  * the networking, so no interrupts or new tx packets will
7394  * end up in the driver.  tp->{tx,}lock is not held and we are not
7395  * in an interrupt context and thus may sleep.
7396  */
7397 static void tg3_free_rings(struct tg3 *tp)
7398 {
7399         int i, j;
7400
7401         for (j = 0; j < tp->irq_cnt; j++) {
7402                 struct tg3_napi *tnapi = &tp->napi[j];
7403
7404                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7405
7406                 if (!tnapi->tx_buffers)
7407                         continue;
7408
7409                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7410                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7411
7412                         if (!skb)
7413                                 continue;
7414
7415                         tg3_tx_skb_unmap(tnapi, i,
7416                                          skb_shinfo(skb)->nr_frags - 1);
7417
7418                         dev_kfree_skb_any(skb);
7419                 }
7420         }
7421         netdev_reset_queue(tp->dev);
7422 }
7423
7424 /* Initialize tx/rx rings for packet processing.
7425  *
7426  * The chip has been shut down and the driver detached from
7427  * the networking, so no interrupts or new tx packets will
7428  * end up in the driver.  tp->{tx,}lock are held and thus
7429  * we may not sleep.
7430  */
7431 static int tg3_init_rings(struct tg3 *tp)
7432 {
7433         int i;
7434
7435         /* Free up all the SKBs. */
7436         tg3_free_rings(tp);
7437
7438         for (i = 0; i < tp->irq_cnt; i++) {
7439                 struct tg3_napi *tnapi = &tp->napi[i];
7440
7441                 tnapi->last_tag = 0;
7442                 tnapi->last_irq_tag = 0;
7443                 tnapi->hw_status->status = 0;
7444                 tnapi->hw_status->status_tag = 0;
7445                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7446
7447                 tnapi->tx_prod = 0;
7448                 tnapi->tx_cons = 0;
7449                 if (tnapi->tx_ring)
7450                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7451
7452                 tnapi->rx_rcb_ptr = 0;
7453                 if (tnapi->rx_rcb)
7454                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7455
7456                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7457                         tg3_free_rings(tp);
7458                         return -ENOMEM;
7459                 }
7460         }
7461
7462         return 0;
7463 }
7464
7465 /*
7466  * Must not be invoked with interrupt sources disabled and
7467  * the hardware shutdown down.
7468  */
7469 static void tg3_free_consistent(struct tg3 *tp)
7470 {
7471         int i;
7472
7473         for (i = 0; i < tp->irq_cnt; i++) {
7474                 struct tg3_napi *tnapi = &tp->napi[i];
7475
7476                 if (tnapi->tx_ring) {
7477                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7478                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7479                         tnapi->tx_ring = NULL;
7480                 }
7481
7482                 kfree(tnapi->tx_buffers);
7483                 tnapi->tx_buffers = NULL;
7484
7485                 if (tnapi->rx_rcb) {
7486                         dma_free_coherent(&tp->pdev->dev,
7487                                           TG3_RX_RCB_RING_BYTES(tp),
7488                                           tnapi->rx_rcb,
7489                                           tnapi->rx_rcb_mapping);
7490                         tnapi->rx_rcb = NULL;
7491                 }
7492
7493                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7494
7495                 if (tnapi->hw_status) {
7496                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7497                                           tnapi->hw_status,
7498                                           tnapi->status_mapping);
7499                         tnapi->hw_status = NULL;
7500                 }
7501         }
7502
7503         if (tp->hw_stats) {
7504                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7505                                   tp->hw_stats, tp->stats_mapping);
7506                 tp->hw_stats = NULL;
7507         }
7508 }
7509
7510 /*
7511  * Must not be invoked with interrupt sources disabled and
7512  * the hardware shutdown down.  Can sleep.
7513  */
7514 static int tg3_alloc_consistent(struct tg3 *tp)
7515 {
7516         int i;
7517
7518         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7519                                           sizeof(struct tg3_hw_stats),
7520                                           &tp->stats_mapping,
7521                                           GFP_KERNEL);
7522         if (!tp->hw_stats)
7523                 goto err_out;
7524
7525         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7526
7527         for (i = 0; i < tp->irq_cnt; i++) {
7528                 struct tg3_napi *tnapi = &tp->napi[i];
7529                 struct tg3_hw_status *sblk;
7530
7531                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7532                                                       TG3_HW_STATUS_SIZE,
7533                                                       &tnapi->status_mapping,
7534                                                       GFP_KERNEL);
7535                 if (!tnapi->hw_status)
7536                         goto err_out;
7537
7538                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7539                 sblk = tnapi->hw_status;
7540
7541                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7542                         goto err_out;
7543
7544                 /* If multivector TSS is enabled, vector 0 does not handle
7545                  * tx interrupts.  Don't allocate any resources for it.
7546                  */
7547                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7548                     (i && tg3_flag(tp, ENABLE_TSS))) {
7549                         tnapi->tx_buffers = kzalloc(
7550                                                sizeof(struct tg3_tx_ring_info) *
7551                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7552                         if (!tnapi->tx_buffers)
7553                                 goto err_out;
7554
7555                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7556                                                             TG3_TX_RING_BYTES,
7557                                                         &tnapi->tx_desc_mapping,
7558                                                             GFP_KERNEL);
7559                         if (!tnapi->tx_ring)
7560                                 goto err_out;
7561                 }
7562
7563                 /*
7564                  * When RSS is enabled, the status block format changes
7565                  * slightly.  The "rx_jumbo_consumer", "reserved",
7566                  * and "rx_mini_consumer" members get mapped to the
7567                  * other three rx return ring producer indexes.
7568                  */
7569                 switch (i) {
7570                 default:
7571                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7572                         break;
7573                 case 2:
7574                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7575                         break;
7576                 case 3:
7577                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7578                         break;
7579                 case 4:
7580                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7581                         break;
7582                 }
7583
7584                 /*
7585                  * If multivector RSS is enabled, vector 0 does not handle
7586                  * rx or tx interrupts.  Don't allocate any resources for it.
7587                  */
7588                 if (!i && tg3_flag(tp, ENABLE_RSS))
7589                         continue;
7590
7591                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7592                                                    TG3_RX_RCB_RING_BYTES(tp),
7593                                                    &tnapi->rx_rcb_mapping,
7594                                                    GFP_KERNEL);
7595                 if (!tnapi->rx_rcb)
7596                         goto err_out;
7597
7598                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7599         }
7600
7601         return 0;
7602
7603 err_out:
7604         tg3_free_consistent(tp);
7605         return -ENOMEM;
7606 }
7607
7608 #define MAX_WAIT_CNT 1000
7609
7610 /* To stop a block, clear the enable bit and poll till it
7611  * clears.  tp->lock is held.
7612  */
7613 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7614 {
7615         unsigned int i;
7616         u32 val;
7617
7618         if (tg3_flag(tp, 5705_PLUS)) {
7619                 switch (ofs) {
7620                 case RCVLSC_MODE:
7621                 case DMAC_MODE:
7622                 case MBFREE_MODE:
7623                 case BUFMGR_MODE:
7624                 case MEMARB_MODE:
7625                         /* We can't enable/disable these bits of the
7626                          * 5705/5750, just say success.
7627                          */
7628                         return 0;
7629
7630                 default:
7631                         break;
7632                 }
7633         }
7634
7635         val = tr32(ofs);
7636         val &= ~enable_bit;
7637         tw32_f(ofs, val);
7638
7639         for (i = 0; i < MAX_WAIT_CNT; i++) {
7640                 udelay(100);
7641                 val = tr32(ofs);
7642                 if ((val & enable_bit) == 0)
7643                         break;
7644         }
7645
7646         if (i == MAX_WAIT_CNT && !silent) {
7647                 dev_err(&tp->pdev->dev,
7648                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7649                         ofs, enable_bit);
7650                 return -ENODEV;
7651         }
7652
7653         return 0;
7654 }
7655
7656 /* tp->lock is held. */
7657 static int tg3_abort_hw(struct tg3 *tp, int silent)
7658 {
7659         int i, err;
7660
7661         tg3_disable_ints(tp);
7662
7663         tp->rx_mode &= ~RX_MODE_ENABLE;
7664         tw32_f(MAC_RX_MODE, tp->rx_mode);
7665         udelay(10);
7666
7667         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7668         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7669         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7670         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7671         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7672         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7673
7674         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7675         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7676         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7677         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7678         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7679         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7680         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7681
7682         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7683         tw32_f(MAC_MODE, tp->mac_mode);
7684         udelay(40);
7685
7686         tp->tx_mode &= ~TX_MODE_ENABLE;
7687         tw32_f(MAC_TX_MODE, tp->tx_mode);
7688
7689         for (i = 0; i < MAX_WAIT_CNT; i++) {
7690                 udelay(100);
7691                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7692                         break;
7693         }
7694         if (i >= MAX_WAIT_CNT) {
7695                 dev_err(&tp->pdev->dev,
7696                         "%s timed out, TX_MODE_ENABLE will not clear "
7697                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7698                 err |= -ENODEV;
7699         }
7700
7701         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7702         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7703         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7704
7705         tw32(FTQ_RESET, 0xffffffff);
7706         tw32(FTQ_RESET, 0x00000000);
7707
7708         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7709         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7710
7711         for (i = 0; i < tp->irq_cnt; i++) {
7712                 struct tg3_napi *tnapi = &tp->napi[i];
7713                 if (tnapi->hw_status)
7714                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7715         }
7716
7717         return err;
7718 }
7719
7720 /* Save PCI command register before chip reset */
7721 static void tg3_save_pci_state(struct tg3 *tp)
7722 {
7723         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7724 }
7725
7726 /* Restore PCI state after chip reset */
7727 static void tg3_restore_pci_state(struct tg3 *tp)
7728 {
7729         u32 val;
7730
7731         /* Re-enable indirect register accesses. */
7732         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7733                                tp->misc_host_ctrl);
7734
7735         /* Set MAX PCI retry to zero. */
7736         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7737         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7738             tg3_flag(tp, PCIX_MODE))
7739                 val |= PCISTATE_RETRY_SAME_DMA;
7740         /* Allow reads and writes to the APE register and memory space. */
7741         if (tg3_flag(tp, ENABLE_APE))
7742                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7743                        PCISTATE_ALLOW_APE_SHMEM_WR |
7744                        PCISTATE_ALLOW_APE_PSPACE_WR;
7745         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7746
7747         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7748
7749         if (!tg3_flag(tp, PCI_EXPRESS)) {
7750                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7751                                       tp->pci_cacheline_sz);
7752                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7753                                       tp->pci_lat_timer);
7754         }
7755
7756         /* Make sure PCI-X relaxed ordering bit is clear. */
7757         if (tg3_flag(tp, PCIX_MODE)) {
7758                 u16 pcix_cmd;
7759
7760                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7761                                      &pcix_cmd);
7762                 pcix_cmd &= ~PCI_X_CMD_ERO;
7763                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7764                                       pcix_cmd);
7765         }
7766
7767         if (tg3_flag(tp, 5780_CLASS)) {
7768
7769                 /* Chip reset on 5780 will reset MSI enable bit,
7770                  * so need to restore it.
7771                  */
7772                 if (tg3_flag(tp, USING_MSI)) {
7773                         u16 ctrl;
7774
7775                         pci_read_config_word(tp->pdev,
7776                                              tp->msi_cap + PCI_MSI_FLAGS,
7777                                              &ctrl);
7778                         pci_write_config_word(tp->pdev,
7779                                               tp->msi_cap + PCI_MSI_FLAGS,
7780                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7781                         val = tr32(MSGINT_MODE);
7782                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7783                 }
7784         }
7785 }
7786
7787 /* tp->lock is held. */
7788 static int tg3_chip_reset(struct tg3 *tp)
7789 {
7790         u32 val;
7791         void (*write_op)(struct tg3 *, u32, u32);
7792         int i, err;
7793
7794         tg3_nvram_lock(tp);
7795
7796         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7797
7798         /* No matching tg3_nvram_unlock() after this because
7799          * chip reset below will undo the nvram lock.
7800          */
7801         tp->nvram_lock_cnt = 0;
7802
7803         /* GRC_MISC_CFG core clock reset will clear the memory
7804          * enable bit in PCI register 4 and the MSI enable bit
7805          * on some chips, so we save relevant registers here.
7806          */
7807         tg3_save_pci_state(tp);
7808
7809         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7810             tg3_flag(tp, 5755_PLUS))
7811                 tw32(GRC_FASTBOOT_PC, 0);
7812
7813         /*
7814          * We must avoid the readl() that normally takes place.
7815          * It locks machines, causes machine checks, and other
7816          * fun things.  So, temporarily disable the 5701
7817          * hardware workaround, while we do the reset.
7818          */
7819         write_op = tp->write32;
7820         if (write_op == tg3_write_flush_reg32)
7821                 tp->write32 = tg3_write32;
7822
7823         /* Prevent the irq handler from reading or writing PCI registers
7824          * during chip reset when the memory enable bit in the PCI command
7825          * register may be cleared.  The chip does not generate interrupt
7826          * at this time, but the irq handler may still be called due to irq
7827          * sharing or irqpoll.
7828          */
7829         tg3_flag_set(tp, CHIP_RESETTING);
7830         for (i = 0; i < tp->irq_cnt; i++) {
7831                 struct tg3_napi *tnapi = &tp->napi[i];
7832                 if (tnapi->hw_status) {
7833                         tnapi->hw_status->status = 0;
7834                         tnapi->hw_status->status_tag = 0;
7835                 }
7836                 tnapi->last_tag = 0;
7837                 tnapi->last_irq_tag = 0;
7838         }
7839         smp_mb();
7840
7841         for (i = 0; i < tp->irq_cnt; i++)
7842                 synchronize_irq(tp->napi[i].irq_vec);
7843
7844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7845                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7846                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7847         }
7848
7849         /* do the reset */
7850         val = GRC_MISC_CFG_CORECLK_RESET;
7851
7852         if (tg3_flag(tp, PCI_EXPRESS)) {
7853                 /* Force PCIe 1.0a mode */
7854                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7855                     !tg3_flag(tp, 57765_PLUS) &&
7856                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7857                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7858                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7859
7860                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7861                         tw32(GRC_MISC_CFG, (1 << 29));
7862                         val |= (1 << 29);
7863                 }
7864         }
7865
7866         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7867                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7868                 tw32(GRC_VCPU_EXT_CTRL,
7869                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7870         }
7871
7872         /* Manage gphy power for all CPMU absent PCIe devices. */
7873         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7874                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7875
7876         tw32(GRC_MISC_CFG, val);
7877
7878         /* restore 5701 hardware bug workaround write method */
7879         tp->write32 = write_op;
7880
7881         /* Unfortunately, we have to delay before the PCI read back.
7882          * Some 575X chips even will not respond to a PCI cfg access
7883          * when the reset command is given to the chip.
7884          *
7885          * How do these hardware designers expect things to work
7886          * properly if the PCI write is posted for a long period
7887          * of time?  It is always necessary to have some method by
7888          * which a register read back can occur to push the write
7889          * out which does the reset.
7890          *
7891          * For most tg3 variants the trick below was working.
7892          * Ho hum...
7893          */
7894         udelay(120);
7895
7896         /* Flush PCI posted writes.  The normal MMIO registers
7897          * are inaccessible at this time so this is the only
7898          * way to make this reliably (actually, this is no longer
7899          * the case, see above).  I tried to use indirect
7900          * register read/write but this upset some 5701 variants.
7901          */
7902         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7903
7904         udelay(120);
7905
7906         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7907                 u16 val16;
7908
7909                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7910                         int i;
7911                         u32 cfg_val;
7912
7913                         /* Wait for link training to complete.  */
7914                         for (i = 0; i < 5000; i++)
7915                                 udelay(100);
7916
7917                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7918                         pci_write_config_dword(tp->pdev, 0xc4,
7919                                                cfg_val | (1 << 15));
7920                 }
7921
7922                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7923                 pci_read_config_word(tp->pdev,
7924                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7925                                      &val16);
7926                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7927                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7928                 /*
7929                  * Older PCIe devices only support the 128 byte
7930                  * MPS setting.  Enforce the restriction.
7931                  */
7932                 if (!tg3_flag(tp, CPMU_PRESENT))
7933                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7934                 pci_write_config_word(tp->pdev,
7935                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7936                                       val16);
7937
7938                 /* Clear error status */
7939                 pci_write_config_word(tp->pdev,
7940                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7941                                       PCI_EXP_DEVSTA_CED |
7942                                       PCI_EXP_DEVSTA_NFED |
7943                                       PCI_EXP_DEVSTA_FED |
7944                                       PCI_EXP_DEVSTA_URD);
7945         }
7946
7947         tg3_restore_pci_state(tp);
7948
7949         tg3_flag_clear(tp, CHIP_RESETTING);
7950         tg3_flag_clear(tp, ERROR_PROCESSED);
7951
7952         val = 0;
7953         if (tg3_flag(tp, 5780_CLASS))
7954                 val = tr32(MEMARB_MODE);
7955         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7956
7957         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7958                 tg3_stop_fw(tp);
7959                 tw32(0x5000, 0x400);
7960         }
7961
7962         tw32(GRC_MODE, tp->grc_mode);
7963
7964         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7965                 val = tr32(0xc4);
7966
7967                 tw32(0xc4, val | (1 << 15));
7968         }
7969
7970         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7972                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7973                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7974                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7975                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7976         }
7977
7978         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7979                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7980                 val = tp->mac_mode;
7981         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7982                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7983                 val = tp->mac_mode;
7984         } else
7985                 val = 0;
7986
7987         tw32_f(MAC_MODE, val);
7988         udelay(40);
7989
7990         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7991
7992         err = tg3_poll_fw(tp);
7993         if (err)
7994                 return err;
7995
7996         tg3_mdio_start(tp);
7997
7998         if (tg3_flag(tp, PCI_EXPRESS) &&
7999             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8000             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8001             !tg3_flag(tp, 57765_PLUS)) {
8002                 val = tr32(0x7c00);
8003
8004                 tw32(0x7c00, val | (1 << 25));
8005         }
8006
8007         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8008                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8009                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8010         }
8011
8012         /* Reprobe ASF enable state.  */
8013         tg3_flag_clear(tp, ENABLE_ASF);
8014         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8015         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8016         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8017                 u32 nic_cfg;
8018
8019                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8020                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8021                         tg3_flag_set(tp, ENABLE_ASF);
8022                         tp->last_event_jiffies = jiffies;
8023                         if (tg3_flag(tp, 5750_PLUS))
8024                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8025                 }
8026         }
8027
8028         return 0;
8029 }
8030
8031 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8032                                                  struct rtnl_link_stats64 *);
8033 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8034                                                 struct tg3_ethtool_stats *);
8035
8036 /* tp->lock is held. */
8037 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8038 {
8039         int err;
8040
8041         tg3_stop_fw(tp);
8042
8043         tg3_write_sig_pre_reset(tp, kind);
8044
8045         tg3_abort_hw(tp, silent);
8046         err = tg3_chip_reset(tp);
8047
8048         __tg3_set_mac_addr(tp, 0);
8049
8050         tg3_write_sig_legacy(tp, kind);
8051         tg3_write_sig_post_reset(tp, kind);
8052
8053         if (tp->hw_stats) {
8054                 /* Save the stats across chip resets... */
8055                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8056                 tg3_get_estats(tp, &tp->estats_prev);
8057
8058                 /* And make sure the next sample is new data */
8059                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8060         }
8061
8062         if (err)
8063                 return err;
8064
8065         return 0;
8066 }
8067
8068 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8069 {
8070         struct tg3 *tp = netdev_priv(dev);
8071         struct sockaddr *addr = p;
8072         int err = 0, skip_mac_1 = 0;
8073
8074         if (!is_valid_ether_addr(addr->sa_data))
8075                 return -EINVAL;
8076
8077         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8078
8079         if (!netif_running(dev))
8080                 return 0;
8081
8082         if (tg3_flag(tp, ENABLE_ASF)) {
8083                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8084
8085                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8086                 addr0_low = tr32(MAC_ADDR_0_LOW);
8087                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8088                 addr1_low = tr32(MAC_ADDR_1_LOW);
8089
8090                 /* Skip MAC addr 1 if ASF is using it. */
8091                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8092                     !(addr1_high == 0 && addr1_low == 0))
8093                         skip_mac_1 = 1;
8094         }
8095         spin_lock_bh(&tp->lock);
8096         __tg3_set_mac_addr(tp, skip_mac_1);
8097         spin_unlock_bh(&tp->lock);
8098
8099         return err;
8100 }
8101
8102 /* tp->lock is held. */
8103 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8104                            dma_addr_t mapping, u32 maxlen_flags,
8105                            u32 nic_addr)
8106 {
8107         tg3_write_mem(tp,
8108                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8109                       ((u64) mapping >> 32));
8110         tg3_write_mem(tp,
8111                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8112                       ((u64) mapping & 0xffffffff));
8113         tg3_write_mem(tp,
8114                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8115                        maxlen_flags);
8116
8117         if (!tg3_flag(tp, 5705_PLUS))
8118                 tg3_write_mem(tp,
8119                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8120                               nic_addr);
8121 }
8122
8123 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8124 {
8125         int i;
8126
8127         if (!tg3_flag(tp, ENABLE_TSS)) {
8128                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8129                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8130                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8131         } else {
8132                 tw32(HOSTCC_TXCOL_TICKS, 0);
8133                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8134                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8135         }
8136
8137         if (!tg3_flag(tp, ENABLE_RSS)) {
8138                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8139                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8140                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8141         } else {
8142                 tw32(HOSTCC_RXCOL_TICKS, 0);
8143                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8144                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8145         }
8146
8147         if (!tg3_flag(tp, 5705_PLUS)) {
8148                 u32 val = ec->stats_block_coalesce_usecs;
8149
8150                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8151                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8152
8153                 if (!netif_carrier_ok(tp->dev))
8154                         val = 0;
8155
8156                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8157         }
8158
8159         for (i = 0; i < tp->irq_cnt - 1; i++) {
8160                 u32 reg;
8161
8162                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8163                 tw32(reg, ec->rx_coalesce_usecs);
8164                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8165                 tw32(reg, ec->rx_max_coalesced_frames);
8166                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8167                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8168
8169                 if (tg3_flag(tp, ENABLE_TSS)) {
8170                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8171                         tw32(reg, ec->tx_coalesce_usecs);
8172                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8173                         tw32(reg, ec->tx_max_coalesced_frames);
8174                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8175                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8176                 }
8177         }
8178
8179         for (; i < tp->irq_max - 1; i++) {
8180                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8181                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8182                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8183
8184                 if (tg3_flag(tp, ENABLE_TSS)) {
8185                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8186                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8187                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8188                 }
8189         }
8190 }
8191
8192 /* tp->lock is held. */
8193 static void tg3_rings_reset(struct tg3 *tp)
8194 {
8195         int i;
8196         u32 stblk, txrcb, rxrcb, limit;
8197         struct tg3_napi *tnapi = &tp->napi[0];
8198
8199         /* Disable all transmit rings but the first. */
8200         if (!tg3_flag(tp, 5705_PLUS))
8201                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8202         else if (tg3_flag(tp, 5717_PLUS))
8203                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8204         else if (tg3_flag(tp, 57765_CLASS))
8205                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8206         else
8207                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8208
8209         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8210              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8211                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8212                               BDINFO_FLAGS_DISABLED);
8213
8214
8215         /* Disable all receive return rings but the first. */
8216         if (tg3_flag(tp, 5717_PLUS))
8217                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8218         else if (!tg3_flag(tp, 5705_PLUS))
8219                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8220         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8221                  tg3_flag(tp, 57765_CLASS))
8222                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8223         else
8224                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8225
8226         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8227              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8228                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8229                               BDINFO_FLAGS_DISABLED);
8230
8231         /* Disable interrupts */
8232         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8233         tp->napi[0].chk_msi_cnt = 0;
8234         tp->napi[0].last_rx_cons = 0;
8235         tp->napi[0].last_tx_cons = 0;
8236
8237         /* Zero mailbox registers. */
8238         if (tg3_flag(tp, SUPPORT_MSIX)) {
8239                 for (i = 1; i < tp->irq_max; i++) {
8240                         tp->napi[i].tx_prod = 0;
8241                         tp->napi[i].tx_cons = 0;
8242                         if (tg3_flag(tp, ENABLE_TSS))
8243                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8244                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8245                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8246                         tp->napi[i].chk_msi_cnt = 0;
8247                         tp->napi[i].last_rx_cons = 0;
8248                         tp->napi[i].last_tx_cons = 0;
8249                 }
8250                 if (!tg3_flag(tp, ENABLE_TSS))
8251                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8252         } else {
8253                 tp->napi[0].tx_prod = 0;
8254                 tp->napi[0].tx_cons = 0;
8255                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8256                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8257         }
8258
8259         /* Make sure the NIC-based send BD rings are disabled. */
8260         if (!tg3_flag(tp, 5705_PLUS)) {
8261                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8262                 for (i = 0; i < 16; i++)
8263                         tw32_tx_mbox(mbox + i * 8, 0);
8264         }
8265
8266         txrcb = NIC_SRAM_SEND_RCB;
8267         rxrcb = NIC_SRAM_RCV_RET_RCB;
8268
8269         /* Clear status block in ram. */
8270         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8271
8272         /* Set status block DMA address */
8273         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8274              ((u64) tnapi->status_mapping >> 32));
8275         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8276              ((u64) tnapi->status_mapping & 0xffffffff));
8277
8278         if (tnapi->tx_ring) {
8279                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8280                                (TG3_TX_RING_SIZE <<
8281                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8282                                NIC_SRAM_TX_BUFFER_DESC);
8283                 txrcb += TG3_BDINFO_SIZE;
8284         }
8285
8286         if (tnapi->rx_rcb) {
8287                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8288                                (tp->rx_ret_ring_mask + 1) <<
8289                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8290                 rxrcb += TG3_BDINFO_SIZE;
8291         }
8292
8293         stblk = HOSTCC_STATBLCK_RING1;
8294
8295         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8296                 u64 mapping = (u64)tnapi->status_mapping;
8297                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8298                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8299
8300                 /* Clear status block in ram. */
8301                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8302
8303                 if (tnapi->tx_ring) {
8304                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8305                                        (TG3_TX_RING_SIZE <<
8306                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8307                                        NIC_SRAM_TX_BUFFER_DESC);
8308                         txrcb += TG3_BDINFO_SIZE;
8309                 }
8310
8311                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8312                                ((tp->rx_ret_ring_mask + 1) <<
8313                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8314
8315                 stblk += 8;
8316                 rxrcb += TG3_BDINFO_SIZE;
8317         }
8318 }
8319
8320 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8321 {
8322         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8323
8324         if (!tg3_flag(tp, 5750_PLUS) ||
8325             tg3_flag(tp, 5780_CLASS) ||
8326             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8327             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8328             tg3_flag(tp, 57765_PLUS))
8329                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8330         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8331                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8332                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8333         else
8334                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8335
8336         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8337         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8338
8339         val = min(nic_rep_thresh, host_rep_thresh);
8340         tw32(RCVBDI_STD_THRESH, val);
8341
8342         if (tg3_flag(tp, 57765_PLUS))
8343                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8344
8345         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8346                 return;
8347
8348         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8349
8350         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8351
8352         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8353         tw32(RCVBDI_JUMBO_THRESH, val);
8354
8355         if (tg3_flag(tp, 57765_PLUS))
8356                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8357 }
8358
8359 static inline u32 calc_crc(unsigned char *buf, int len)
8360 {
8361         u32 reg;
8362         u32 tmp;
8363         int j, k;
8364
8365         reg = 0xffffffff;
8366
8367         for (j = 0; j < len; j++) {
8368                 reg ^= buf[j];
8369
8370                 for (k = 0; k < 8; k++) {
8371                         tmp = reg & 0x01;
8372
8373                         reg >>= 1;
8374
8375                         if (tmp)
8376                                 reg ^= 0xedb88320;
8377                 }
8378         }
8379
8380         return ~reg;
8381 }
8382
8383 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8384 {
8385         /* accept or reject all multicast frames */
8386         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8387         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8388         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8389         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8390 }
8391
8392 static void __tg3_set_rx_mode(struct net_device *dev)
8393 {
8394         struct tg3 *tp = netdev_priv(dev);
8395         u32 rx_mode;
8396
8397         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8398                                   RX_MODE_KEEP_VLAN_TAG);
8399
8400 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8401         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8402          * flag clear.
8403          */
8404         if (!tg3_flag(tp, ENABLE_ASF))
8405                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8406 #endif
8407
8408         if (dev->flags & IFF_PROMISC) {
8409                 /* Promiscuous mode. */
8410                 rx_mode |= RX_MODE_PROMISC;
8411         } else if (dev->flags & IFF_ALLMULTI) {
8412                 /* Accept all multicast. */
8413                 tg3_set_multi(tp, 1);
8414         } else if (netdev_mc_empty(dev)) {
8415                 /* Reject all multicast. */
8416                 tg3_set_multi(tp, 0);
8417         } else {
8418                 /* Accept one or more multicast(s). */
8419                 struct netdev_hw_addr *ha;
8420                 u32 mc_filter[4] = { 0, };
8421                 u32 regidx;
8422                 u32 bit;
8423                 u32 crc;
8424
8425                 netdev_for_each_mc_addr(ha, dev) {
8426                         crc = calc_crc(ha->addr, ETH_ALEN);
8427                         bit = ~crc & 0x7f;
8428                         regidx = (bit & 0x60) >> 5;
8429                         bit &= 0x1f;
8430                         mc_filter[regidx] |= (1 << bit);
8431                 }
8432
8433                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8434                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8435                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8436                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8437         }
8438
8439         if (rx_mode != tp->rx_mode) {
8440                 tp->rx_mode = rx_mode;
8441                 tw32_f(MAC_RX_MODE, rx_mode);
8442                 udelay(10);
8443         }
8444 }
8445
8446 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8447 {
8448         int i;
8449
8450         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8451                 tp->rss_ind_tbl[i] =
8452                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8453 }
8454
8455 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8456 {
8457         int i;
8458
8459         if (!tg3_flag(tp, SUPPORT_MSIX))
8460                 return;
8461
8462         if (tp->irq_cnt <= 2) {
8463                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8464                 return;
8465         }
8466
8467         /* Validate table against current IRQ count */
8468         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8469                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8470                         break;
8471         }
8472
8473         if (i != TG3_RSS_INDIR_TBL_SIZE)
8474                 tg3_rss_init_dflt_indir_tbl(tp);
8475 }
8476
8477 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8478 {
8479         int i = 0;
8480         u32 reg = MAC_RSS_INDIR_TBL_0;
8481
8482         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8483                 u32 val = tp->rss_ind_tbl[i];
8484                 i++;
8485                 for (; i % 8; i++) {
8486                         val <<= 4;
8487                         val |= tp->rss_ind_tbl[i];
8488                 }
8489                 tw32(reg, val);
8490                 reg += 4;
8491         }
8492 }
8493
8494 /* tp->lock is held. */
8495 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8496 {
8497         u32 val, rdmac_mode;
8498         int i, err, limit;
8499         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8500
8501         tg3_disable_ints(tp);
8502
8503         tg3_stop_fw(tp);
8504
8505         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8506
8507         if (tg3_flag(tp, INIT_COMPLETE))
8508                 tg3_abort_hw(tp, 1);
8509
8510         /* Enable MAC control of LPI */
8511         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8512                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8513                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8514                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8515
8516                 tw32_f(TG3_CPMU_EEE_CTRL,
8517                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8518
8519                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8520                       TG3_CPMU_EEEMD_LPI_IN_TX |
8521                       TG3_CPMU_EEEMD_LPI_IN_RX |
8522                       TG3_CPMU_EEEMD_EEE_ENABLE;
8523
8524                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8525                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8526
8527                 if (tg3_flag(tp, ENABLE_APE))
8528                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8529
8530                 tw32_f(TG3_CPMU_EEE_MODE, val);
8531
8532                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8533                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8534                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8535
8536                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8537                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8538                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8539         }
8540
8541         if (reset_phy)
8542                 tg3_phy_reset(tp);
8543
8544         err = tg3_chip_reset(tp);
8545         if (err)
8546                 return err;
8547
8548         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8549
8550         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8551                 val = tr32(TG3_CPMU_CTRL);
8552                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8553                 tw32(TG3_CPMU_CTRL, val);
8554
8555                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8556                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8557                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8558                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8559
8560                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8561                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8562                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8563                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8564
8565                 val = tr32(TG3_CPMU_HST_ACC);
8566                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8567                 val |= CPMU_HST_ACC_MACCLK_6_25;
8568                 tw32(TG3_CPMU_HST_ACC, val);
8569         }
8570
8571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8572                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8573                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8574                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8575                 tw32(PCIE_PWR_MGMT_THRESH, val);
8576
8577                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8578                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8579
8580                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8581
8582                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8583                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8584         }
8585
8586         if (tg3_flag(tp, L1PLLPD_EN)) {
8587                 u32 grc_mode = tr32(GRC_MODE);
8588
8589                 /* Access the lower 1K of PL PCIE block registers. */
8590                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8591                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8592
8593                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8594                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8595                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8596
8597                 tw32(GRC_MODE, grc_mode);
8598         }
8599
8600         if (tg3_flag(tp, 57765_CLASS)) {
8601                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8602                         u32 grc_mode = tr32(GRC_MODE);
8603
8604                         /* Access the lower 1K of PL PCIE block registers. */
8605                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8606                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8607
8608                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8609                                    TG3_PCIE_PL_LO_PHYCTL5);
8610                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8611                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8612
8613                         tw32(GRC_MODE, grc_mode);
8614                 }
8615
8616                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8617                         u32 grc_mode = tr32(GRC_MODE);
8618
8619                         /* Access the lower 1K of DL PCIE block registers. */
8620                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8621                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8622
8623                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8624                                    TG3_PCIE_DL_LO_FTSMAX);
8625                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8626                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8627                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8628
8629                         tw32(GRC_MODE, grc_mode);
8630                 }
8631
8632                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8633                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8634                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8635                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8636         }
8637
8638         /* This works around an issue with Athlon chipsets on
8639          * B3 tigon3 silicon.  This bit has no effect on any
8640          * other revision.  But do not set this on PCI Express
8641          * chips and don't even touch the clocks if the CPMU is present.
8642          */
8643         if (!tg3_flag(tp, CPMU_PRESENT)) {
8644                 if (!tg3_flag(tp, PCI_EXPRESS))
8645                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8646                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8647         }
8648
8649         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8650             tg3_flag(tp, PCIX_MODE)) {
8651                 val = tr32(TG3PCI_PCISTATE);
8652                 val |= PCISTATE_RETRY_SAME_DMA;
8653                 tw32(TG3PCI_PCISTATE, val);
8654         }
8655
8656         if (tg3_flag(tp, ENABLE_APE)) {
8657                 /* Allow reads and writes to the
8658                  * APE register and memory space.
8659                  */
8660                 val = tr32(TG3PCI_PCISTATE);
8661                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8662                        PCISTATE_ALLOW_APE_SHMEM_WR |
8663                        PCISTATE_ALLOW_APE_PSPACE_WR;
8664                 tw32(TG3PCI_PCISTATE, val);
8665         }
8666
8667         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8668                 /* Enable some hw fixes.  */
8669                 val = tr32(TG3PCI_MSI_DATA);
8670                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8671                 tw32(TG3PCI_MSI_DATA, val);
8672         }
8673
8674         /* Descriptor ring init may make accesses to the
8675          * NIC SRAM area to setup the TX descriptors, so we
8676          * can only do this after the hardware has been
8677          * successfully reset.
8678          */
8679         err = tg3_init_rings(tp);
8680         if (err)
8681                 return err;
8682
8683         if (tg3_flag(tp, 57765_PLUS)) {
8684                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8685                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8686                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8687                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8688                 if (!tg3_flag(tp, 57765_CLASS) &&
8689                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8690                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8691                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8692         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8693                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8694                 /* This value is determined during the probe time DMA
8695                  * engine test, tg3_test_dma.
8696                  */
8697                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8698         }
8699
8700         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8701                           GRC_MODE_4X_NIC_SEND_RINGS |
8702                           GRC_MODE_NO_TX_PHDR_CSUM |
8703                           GRC_MODE_NO_RX_PHDR_CSUM);
8704         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8705
8706         /* Pseudo-header checksum is done by hardware logic and not
8707          * the offload processers, so make the chip do the pseudo-
8708          * header checksums on receive.  For transmit it is more
8709          * convenient to do the pseudo-header checksum in software
8710          * as Linux does that on transmit for us in all cases.
8711          */
8712         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8713
8714         tw32(GRC_MODE,
8715              tp->grc_mode |
8716              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8717
8718         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8719         val = tr32(GRC_MISC_CFG);
8720         val &= ~0xff;
8721         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8722         tw32(GRC_MISC_CFG, val);
8723
8724         /* Initialize MBUF/DESC pool. */
8725         if (tg3_flag(tp, 5750_PLUS)) {
8726                 /* Do nothing.  */
8727         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8728                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8729                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8730                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8731                 else
8732                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8733                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8734                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8735         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8736                 int fw_len;
8737
8738                 fw_len = tp->fw_len;
8739                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8740                 tw32(BUFMGR_MB_POOL_ADDR,
8741                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8742                 tw32(BUFMGR_MB_POOL_SIZE,
8743                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8744         }
8745
8746         if (tp->dev->mtu <= ETH_DATA_LEN) {
8747                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8748                      tp->bufmgr_config.mbuf_read_dma_low_water);
8749                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8750                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8751                 tw32(BUFMGR_MB_HIGH_WATER,
8752                      tp->bufmgr_config.mbuf_high_water);
8753         } else {
8754                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8755                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8756                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8757                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8758                 tw32(BUFMGR_MB_HIGH_WATER,
8759                      tp->bufmgr_config.mbuf_high_water_jumbo);
8760         }
8761         tw32(BUFMGR_DMA_LOW_WATER,
8762              tp->bufmgr_config.dma_low_water);
8763         tw32(BUFMGR_DMA_HIGH_WATER,
8764              tp->bufmgr_config.dma_high_water);
8765
8766         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8768                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8770             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8771             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8772                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8773         tw32(BUFMGR_MODE, val);
8774         for (i = 0; i < 2000; i++) {
8775                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8776                         break;
8777                 udelay(10);
8778         }
8779         if (i >= 2000) {
8780                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8781                 return -ENODEV;
8782         }
8783
8784         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8785                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8786
8787         tg3_setup_rxbd_thresholds(tp);
8788
8789         /* Initialize TG3_BDINFO's at:
8790          *  RCVDBDI_STD_BD:     standard eth size rx ring
8791          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8792          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8793          *
8794          * like so:
8795          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8796          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8797          *                              ring attribute flags
8798          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8799          *
8800          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8801          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8802          *
8803          * The size of each ring is fixed in the firmware, but the location is
8804          * configurable.
8805          */
8806         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8807              ((u64) tpr->rx_std_mapping >> 32));
8808         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8809              ((u64) tpr->rx_std_mapping & 0xffffffff));
8810         if (!tg3_flag(tp, 5717_PLUS))
8811                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8812                      NIC_SRAM_RX_BUFFER_DESC);
8813
8814         /* Disable the mini ring */
8815         if (!tg3_flag(tp, 5705_PLUS))
8816                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8817                      BDINFO_FLAGS_DISABLED);
8818
8819         /* Program the jumbo buffer descriptor ring control
8820          * blocks on those devices that have them.
8821          */
8822         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8823             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8824
8825                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8826                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8827                              ((u64) tpr->rx_jmb_mapping >> 32));
8828                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8829                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8830                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8831                               BDINFO_FLAGS_MAXLEN_SHIFT;
8832                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8833                              val | BDINFO_FLAGS_USE_EXT_RECV);
8834                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8835                             tg3_flag(tp, 57765_CLASS))
8836                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8837                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8838                 } else {
8839                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8840                              BDINFO_FLAGS_DISABLED);
8841                 }
8842
8843                 if (tg3_flag(tp, 57765_PLUS)) {
8844                         val = TG3_RX_STD_RING_SIZE(tp);
8845                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8846                         val |= (TG3_RX_STD_DMA_SZ << 2);
8847                 } else
8848                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8849         } else
8850                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8851
8852         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8853
8854         tpr->rx_std_prod_idx = tp->rx_pending;
8855         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8856
8857         tpr->rx_jmb_prod_idx =
8858                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8859         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8860
8861         tg3_rings_reset(tp);
8862
8863         /* Initialize MAC address and backoff seed. */
8864         __tg3_set_mac_addr(tp, 0);
8865
8866         /* MTU + ethernet header + FCS + optional VLAN tag */
8867         tw32(MAC_RX_MTU_SIZE,
8868              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8869
8870         /* The slot time is changed by tg3_setup_phy if we
8871          * run at gigabit with half duplex.
8872          */
8873         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8874               (6 << TX_LENGTHS_IPG_SHIFT) |
8875               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8876
8877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8878                 val |= tr32(MAC_TX_LENGTHS) &
8879                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8880                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8881
8882         tw32(MAC_TX_LENGTHS, val);
8883
8884         /* Receive rules. */
8885         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8886         tw32(RCVLPC_CONFIG, 0x0181);
8887
8888         /* Calculate RDMAC_MODE setting early, we need it to determine
8889          * the RCVLPC_STATE_ENABLE mask.
8890          */
8891         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8892                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8893                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8894                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8895                       RDMAC_MODE_LNGREAD_ENAB);
8896
8897         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8898                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8899
8900         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8901             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8903                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8904                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8905                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8906
8907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8908             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8909                 if (tg3_flag(tp, TSO_CAPABLE) &&
8910                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8911                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8912                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8913                            !tg3_flag(tp, IS_5788)) {
8914                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8915                 }
8916         }
8917
8918         if (tg3_flag(tp, PCI_EXPRESS))
8919                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8920
8921         if (tg3_flag(tp, HW_TSO_1) ||
8922             tg3_flag(tp, HW_TSO_2) ||
8923             tg3_flag(tp, HW_TSO_3))
8924                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8925
8926         if (tg3_flag(tp, 57765_PLUS) ||
8927             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8928             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8929                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8930
8931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8932                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8933
8934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8936             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8938             tg3_flag(tp, 57765_PLUS)) {
8939                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8940                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8941                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8942                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8943                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8944                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8945                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8946                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8947                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8948                 }
8949                 tw32(TG3_RDMA_RSRVCTRL_REG,
8950                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8951         }
8952
8953         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8955                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8956                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8957                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8958                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8959         }
8960
8961         /* Receive/send statistics. */
8962         if (tg3_flag(tp, 5750_PLUS)) {
8963                 val = tr32(RCVLPC_STATS_ENABLE);
8964                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8965                 tw32(RCVLPC_STATS_ENABLE, val);
8966         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8967                    tg3_flag(tp, TSO_CAPABLE)) {
8968                 val = tr32(RCVLPC_STATS_ENABLE);
8969                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8970                 tw32(RCVLPC_STATS_ENABLE, val);
8971         } else {
8972                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8973         }
8974         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8975         tw32(SNDDATAI_STATSENAB, 0xffffff);
8976         tw32(SNDDATAI_STATSCTRL,
8977              (SNDDATAI_SCTRL_ENABLE |
8978               SNDDATAI_SCTRL_FASTUPD));
8979
8980         /* Setup host coalescing engine. */
8981         tw32(HOSTCC_MODE, 0);
8982         for (i = 0; i < 2000; i++) {
8983                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8984                         break;
8985                 udelay(10);
8986         }
8987
8988         __tg3_set_coalesce(tp, &tp->coal);
8989
8990         if (!tg3_flag(tp, 5705_PLUS)) {
8991                 /* Status/statistics block address.  See tg3_timer,
8992                  * the tg3_periodic_fetch_stats call there, and
8993                  * tg3_get_stats to see how this works for 5705/5750 chips.
8994                  */
8995                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8996                      ((u64) tp->stats_mapping >> 32));
8997                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8998                      ((u64) tp->stats_mapping & 0xffffffff));
8999                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9000
9001                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9002
9003                 /* Clear statistics and status block memory areas */
9004                 for (i = NIC_SRAM_STATS_BLK;
9005                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9006                      i += sizeof(u32)) {
9007                         tg3_write_mem(tp, i, 0);
9008                         udelay(40);
9009                 }
9010         }
9011
9012         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9013
9014         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9015         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9016         if (!tg3_flag(tp, 5705_PLUS))
9017                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9018
9019         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9020                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9021                 /* reset to prevent losing 1st rx packet intermittently */
9022                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9023                 udelay(10);
9024         }
9025
9026         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9027                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9028                         MAC_MODE_FHDE_ENABLE;
9029         if (tg3_flag(tp, ENABLE_APE))
9030                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9031         if (!tg3_flag(tp, 5705_PLUS) &&
9032             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9033             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9034                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9035         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9036         udelay(40);
9037
9038         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9039          * If TG3_FLAG_IS_NIC is zero, we should read the
9040          * register to preserve the GPIO settings for LOMs. The GPIOs,
9041          * whether used as inputs or outputs, are set by boot code after
9042          * reset.
9043          */
9044         if (!tg3_flag(tp, IS_NIC)) {
9045                 u32 gpio_mask;
9046
9047                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9048                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9049                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9050
9051                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9052                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9053                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9054
9055                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9056                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9057
9058                 tp->grc_local_ctrl &= ~gpio_mask;
9059                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9060
9061                 /* GPIO1 must be driven high for eeprom write protect */
9062                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9063                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9064                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9065         }
9066         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9067         udelay(100);
9068
9069         if (tg3_flag(tp, USING_MSIX)) {
9070                 val = tr32(MSGINT_MODE);
9071                 val |= MSGINT_MODE_ENABLE;
9072                 if (tp->irq_cnt > 1)
9073                         val |= MSGINT_MODE_MULTIVEC_EN;
9074                 if (!tg3_flag(tp, 1SHOT_MSI))
9075                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9076                 tw32(MSGINT_MODE, val);
9077         }
9078
9079         if (!tg3_flag(tp, 5705_PLUS)) {
9080                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9081                 udelay(40);
9082         }
9083
9084         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9085                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9086                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9087                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9088                WDMAC_MODE_LNGREAD_ENAB);
9089
9090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9091             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9092                 if (tg3_flag(tp, TSO_CAPABLE) &&
9093                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9094                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9095                         /* nothing */
9096                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9097                            !tg3_flag(tp, IS_5788)) {
9098                         val |= WDMAC_MODE_RX_ACCEL;
9099                 }
9100         }
9101
9102         /* Enable host coalescing bug fix */
9103         if (tg3_flag(tp, 5755_PLUS))
9104                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9105
9106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9107                 val |= WDMAC_MODE_BURST_ALL_DATA;
9108
9109         tw32_f(WDMAC_MODE, val);
9110         udelay(40);
9111
9112         if (tg3_flag(tp, PCIX_MODE)) {
9113                 u16 pcix_cmd;
9114
9115                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9116                                      &pcix_cmd);
9117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9118                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9119                         pcix_cmd |= PCI_X_CMD_READ_2K;
9120                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9121                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9122                         pcix_cmd |= PCI_X_CMD_READ_2K;
9123                 }
9124                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9125                                       pcix_cmd);
9126         }
9127
9128         tw32_f(RDMAC_MODE, rdmac_mode);
9129         udelay(40);
9130
9131         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9132         if (!tg3_flag(tp, 5705_PLUS))
9133                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9134
9135         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9136                 tw32(SNDDATAC_MODE,
9137                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9138         else
9139                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9140
9141         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9142         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9143         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9144         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9145                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9146         tw32(RCVDBDI_MODE, val);
9147         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9148         if (tg3_flag(tp, HW_TSO_1) ||
9149             tg3_flag(tp, HW_TSO_2) ||
9150             tg3_flag(tp, HW_TSO_3))
9151                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9152         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9153         if (tg3_flag(tp, ENABLE_TSS))
9154                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9155         tw32(SNDBDI_MODE, val);
9156         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9157
9158         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9159                 err = tg3_load_5701_a0_firmware_fix(tp);
9160                 if (err)
9161                         return err;
9162         }
9163
9164         if (tg3_flag(tp, TSO_CAPABLE)) {
9165                 err = tg3_load_tso_firmware(tp);
9166                 if (err)
9167                         return err;
9168         }
9169
9170         tp->tx_mode = TX_MODE_ENABLE;
9171
9172         if (tg3_flag(tp, 5755_PLUS) ||
9173             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9174                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9175
9176         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9177                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9178                 tp->tx_mode &= ~val;
9179                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9180         }
9181
9182         tw32_f(MAC_TX_MODE, tp->tx_mode);
9183         udelay(100);
9184
9185         if (tg3_flag(tp, ENABLE_RSS)) {
9186                 tg3_rss_write_indir_tbl(tp);
9187
9188                 /* Setup the "secret" hash key. */
9189                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9190                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9191                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9192                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9193                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9194                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9195                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9196                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9197                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9198                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9199         }
9200
9201         tp->rx_mode = RX_MODE_ENABLE;
9202         if (tg3_flag(tp, 5755_PLUS))
9203                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9204
9205         if (tg3_flag(tp, ENABLE_RSS))
9206                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9207                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9208                                RX_MODE_RSS_IPV6_HASH_EN |
9209                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9210                                RX_MODE_RSS_IPV4_HASH_EN |
9211                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9212
9213         tw32_f(MAC_RX_MODE, tp->rx_mode);
9214         udelay(10);
9215
9216         tw32(MAC_LED_CTRL, tp->led_ctrl);
9217
9218         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9219         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9220                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9221                 udelay(10);
9222         }
9223         tw32_f(MAC_RX_MODE, tp->rx_mode);
9224         udelay(10);
9225
9226         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9227                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9228                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9229                         /* Set drive transmission level to 1.2V  */
9230                         /* only if the signal pre-emphasis bit is not set  */
9231                         val = tr32(MAC_SERDES_CFG);
9232                         val &= 0xfffff000;
9233                         val |= 0x880;
9234                         tw32(MAC_SERDES_CFG, val);
9235                 }
9236                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9237                         tw32(MAC_SERDES_CFG, 0x616000);
9238         }
9239
9240         /* Prevent chip from dropping frames when flow control
9241          * is enabled.
9242          */
9243         if (tg3_flag(tp, 57765_CLASS))
9244                 val = 1;
9245         else
9246                 val = 2;
9247         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9248
9249         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9250             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9251                 /* Use hardware link auto-negotiation */
9252                 tg3_flag_set(tp, HW_AUTONEG);
9253         }
9254
9255         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9256             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9257                 u32 tmp;
9258
9259                 tmp = tr32(SERDES_RX_CTRL);
9260                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9261                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9262                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9263                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9264         }
9265
9266         if (!tg3_flag(tp, USE_PHYLIB)) {
9267                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9268                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9269                         tp->link_config.speed = tp->link_config.orig_speed;
9270                         tp->link_config.duplex = tp->link_config.orig_duplex;
9271                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9272                 }
9273
9274                 err = tg3_setup_phy(tp, 0);
9275                 if (err)
9276                         return err;
9277
9278                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9279                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9280                         u32 tmp;
9281
9282                         /* Clear CRC stats. */
9283                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9284                                 tg3_writephy(tp, MII_TG3_TEST1,
9285                                              tmp | MII_TG3_TEST1_CRC_EN);
9286                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9287                         }
9288                 }
9289         }
9290
9291         __tg3_set_rx_mode(tp->dev);
9292
9293         /* Initialize receive rules. */
9294         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9295         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9296         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9297         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9298
9299         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9300                 limit = 8;
9301         else
9302                 limit = 16;
9303         if (tg3_flag(tp, ENABLE_ASF))
9304                 limit -= 4;
9305         switch (limit) {
9306         case 16:
9307                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9308         case 15:
9309                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9310         case 14:
9311                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9312         case 13:
9313                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9314         case 12:
9315                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9316         case 11:
9317                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9318         case 10:
9319                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9320         case 9:
9321                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9322         case 8:
9323                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9324         case 7:
9325                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9326         case 6:
9327                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9328         case 5:
9329                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9330         case 4:
9331                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9332         case 3:
9333                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9334         case 2:
9335         case 1:
9336
9337         default:
9338                 break;
9339         }
9340
9341         if (tg3_flag(tp, ENABLE_APE))
9342                 /* Write our heartbeat update interval to APE. */
9343                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9344                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9345
9346         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9347
9348         return 0;
9349 }
9350
9351 /* Called at device open time to get the chip ready for
9352  * packet processing.  Invoked with tp->lock held.
9353  */
9354 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9355 {
9356         tg3_switch_clocks(tp);
9357
9358         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9359
9360         return tg3_reset_hw(tp, reset_phy);
9361 }
9362
9363 /* Restart hardware after configuration changes, self-test, etc.
9364  * Invoked with tp->lock held.
9365  */
9366 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9367         __releases(tp->lock)
9368         __acquires(tp->lock)
9369 {
9370         int err;
9371
9372         err = tg3_init_hw(tp, reset_phy);
9373         if (err) {
9374                 netdev_err(tp->dev,
9375                            "Failed to re-initialize device, aborting\n");
9376                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9377                 tg3_full_unlock(tp);
9378                 del_timer_sync(&tp->timer);
9379                 tp->irq_sync = 0;
9380                 tg3_napi_enable(tp);
9381                 dev_close(tp->dev);
9382                 tg3_full_lock(tp, 0);
9383         }
9384         return err;
9385 }
9386
9387 static void tg3_reset_task(struct work_struct *work)
9388 {
9389         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9390         int err;
9391
9392         tg3_full_lock(tp, 0);
9393
9394         if (!netif_running(tp->dev)) {
9395                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9396                 tg3_full_unlock(tp);
9397                 return;
9398         }
9399
9400         tg3_full_unlock(tp);
9401
9402         tg3_phy_stop(tp);
9403
9404         tg3_netif_stop(tp);
9405
9406         tg3_full_lock(tp, 1);
9407
9408         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9409                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9410                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9411                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9412                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9413         }
9414
9415         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9416         err = tg3_init_hw(tp, 1);
9417         if (err)
9418                 goto out;
9419
9420         tg3_netif_start(tp);
9421
9422 out:
9423         tg3_full_unlock(tp);
9424
9425         if (!err)
9426                 tg3_phy_start(tp);
9427
9428         tg3_flag_clear(tp, RESET_TASK_PENDING);
9429 }
9430
9431 #define TG3_STAT_ADD32(PSTAT, REG) \
9432 do {    u32 __val = tr32(REG); \
9433         (PSTAT)->low += __val; \
9434         if ((PSTAT)->low < __val) \
9435                 (PSTAT)->high += 1; \
9436 } while (0)
9437
9438 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9439 {
9440         struct tg3_hw_stats *sp = tp->hw_stats;
9441
9442         if (!netif_carrier_ok(tp->dev))
9443                 return;
9444
9445         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9446         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9447         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9448         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9449         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9450         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9451         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9452         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9453         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9454         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9455         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9456         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9457         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9458
9459         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9460         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9461         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9462         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9463         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9464         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9465         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9466         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9467         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9468         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9469         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9470         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9471         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9472         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9473
9474         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9475         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9476             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9477             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9478                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9479         } else {
9480                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9481                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9482                 if (val) {
9483                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9484                         sp->rx_discards.low += val;
9485                         if (sp->rx_discards.low < val)
9486                                 sp->rx_discards.high += 1;
9487                 }
9488                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9489         }
9490         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9491 }
9492
9493 static void tg3_chk_missed_msi(struct tg3 *tp)
9494 {
9495         u32 i;
9496
9497         for (i = 0; i < tp->irq_cnt; i++) {
9498                 struct tg3_napi *tnapi = &tp->napi[i];
9499
9500                 if (tg3_has_work(tnapi)) {
9501                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9502                             tnapi->last_tx_cons == tnapi->tx_cons) {
9503                                 if (tnapi->chk_msi_cnt < 1) {
9504                                         tnapi->chk_msi_cnt++;
9505                                         return;
9506                                 }
9507                                 tg3_msi(0, tnapi);
9508                         }
9509                 }
9510                 tnapi->chk_msi_cnt = 0;
9511                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9512                 tnapi->last_tx_cons = tnapi->tx_cons;
9513         }
9514 }
9515
9516 static void tg3_timer(unsigned long __opaque)
9517 {
9518         struct tg3 *tp = (struct tg3 *) __opaque;
9519
9520         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9521                 goto restart_timer;
9522
9523         spin_lock(&tp->lock);
9524
9525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9526             tg3_flag(tp, 57765_CLASS))
9527                 tg3_chk_missed_msi(tp);
9528
9529         if (!tg3_flag(tp, TAGGED_STATUS)) {
9530                 /* All of this garbage is because when using non-tagged
9531                  * IRQ status the mailbox/status_block protocol the chip
9532                  * uses with the cpu is race prone.
9533                  */
9534                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9535                         tw32(GRC_LOCAL_CTRL,
9536                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9537                 } else {
9538                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9539                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9540                 }
9541
9542                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9543                         spin_unlock(&tp->lock);
9544                         tg3_reset_task_schedule(tp);
9545                         goto restart_timer;
9546                 }
9547         }
9548
9549         /* This part only runs once per second. */
9550         if (!--tp->timer_counter) {
9551                 if (tg3_flag(tp, 5705_PLUS))
9552                         tg3_periodic_fetch_stats(tp);
9553
9554                 if (tp->setlpicnt && !--tp->setlpicnt)
9555                         tg3_phy_eee_enable(tp);
9556
9557                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9558                         u32 mac_stat;
9559                         int phy_event;
9560
9561                         mac_stat = tr32(MAC_STATUS);
9562
9563                         phy_event = 0;
9564                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9565                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9566                                         phy_event = 1;
9567                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9568                                 phy_event = 1;
9569
9570                         if (phy_event)
9571                                 tg3_setup_phy(tp, 0);
9572                 } else if (tg3_flag(tp, POLL_SERDES)) {
9573                         u32 mac_stat = tr32(MAC_STATUS);
9574                         int need_setup = 0;
9575
9576                         if (netif_carrier_ok(tp->dev) &&
9577                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9578                                 need_setup = 1;
9579                         }
9580                         if (!netif_carrier_ok(tp->dev) &&
9581                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9582                                          MAC_STATUS_SIGNAL_DET))) {
9583                                 need_setup = 1;
9584                         }
9585                         if (need_setup) {
9586                                 if (!tp->serdes_counter) {
9587                                         tw32_f(MAC_MODE,
9588                                              (tp->mac_mode &
9589                                               ~MAC_MODE_PORT_MODE_MASK));
9590                                         udelay(40);
9591                                         tw32_f(MAC_MODE, tp->mac_mode);
9592                                         udelay(40);
9593                                 }
9594                                 tg3_setup_phy(tp, 0);
9595                         }
9596                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9597                            tg3_flag(tp, 5780_CLASS)) {
9598                         tg3_serdes_parallel_detect(tp);
9599                 }
9600
9601                 tp->timer_counter = tp->timer_multiplier;
9602         }
9603
9604         /* Heartbeat is only sent once every 2 seconds.
9605          *
9606          * The heartbeat is to tell the ASF firmware that the host
9607          * driver is still alive.  In the event that the OS crashes,
9608          * ASF needs to reset the hardware to free up the FIFO space
9609          * that may be filled with rx packets destined for the host.
9610          * If the FIFO is full, ASF will no longer function properly.
9611          *
9612          * Unintended resets have been reported on real time kernels
9613          * where the timer doesn't run on time.  Netpoll will also have
9614          * same problem.
9615          *
9616          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9617          * to check the ring condition when the heartbeat is expiring
9618          * before doing the reset.  This will prevent most unintended
9619          * resets.
9620          */
9621         if (!--tp->asf_counter) {
9622                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9623                         tg3_wait_for_event_ack(tp);
9624
9625                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9626                                       FWCMD_NICDRV_ALIVE3);
9627                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9628                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9629                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9630
9631                         tg3_generate_fw_event(tp);
9632                 }
9633                 tp->asf_counter = tp->asf_multiplier;
9634         }
9635
9636         spin_unlock(&tp->lock);
9637
9638 restart_timer:
9639         tp->timer.expires = jiffies + tp->timer_offset;
9640         add_timer(&tp->timer);
9641 }
9642
9643 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9644 {
9645         irq_handler_t fn;
9646         unsigned long flags;
9647         char *name;
9648         struct tg3_napi *tnapi = &tp->napi[irq_num];
9649
9650         if (tp->irq_cnt == 1)
9651                 name = tp->dev->name;
9652         else {
9653                 name = &tnapi->irq_lbl[0];
9654                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9655                 name[IFNAMSIZ-1] = 0;
9656         }
9657
9658         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9659                 fn = tg3_msi;
9660                 if (tg3_flag(tp, 1SHOT_MSI))
9661                         fn = tg3_msi_1shot;
9662                 flags = 0;
9663         } else {
9664                 fn = tg3_interrupt;
9665                 if (tg3_flag(tp, TAGGED_STATUS))
9666                         fn = tg3_interrupt_tagged;
9667                 flags = IRQF_SHARED;
9668         }
9669
9670         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9671 }
9672
9673 static int tg3_test_interrupt(struct tg3 *tp)
9674 {
9675         struct tg3_napi *tnapi = &tp->napi[0];
9676         struct net_device *dev = tp->dev;
9677         int err, i, intr_ok = 0;
9678         u32 val;
9679
9680         if (!netif_running(dev))
9681                 return -ENODEV;
9682
9683         tg3_disable_ints(tp);
9684
9685         free_irq(tnapi->irq_vec, tnapi);
9686
9687         /*
9688          * Turn off MSI one shot mode.  Otherwise this test has no
9689          * observable way to know whether the interrupt was delivered.
9690          */
9691         if (tg3_flag(tp, 57765_PLUS)) {
9692                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9693                 tw32(MSGINT_MODE, val);
9694         }
9695
9696         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9697                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9698         if (err)
9699                 return err;
9700
9701         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9702         tg3_enable_ints(tp);
9703
9704         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9705                tnapi->coal_now);
9706
9707         for (i = 0; i < 5; i++) {
9708                 u32 int_mbox, misc_host_ctrl;
9709
9710                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9711                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9712
9713                 if ((int_mbox != 0) ||
9714                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9715                         intr_ok = 1;
9716                         break;
9717                 }
9718
9719                 if (tg3_flag(tp, 57765_PLUS) &&
9720                     tnapi->hw_status->status_tag != tnapi->last_tag)
9721                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9722
9723                 msleep(10);
9724         }
9725
9726         tg3_disable_ints(tp);
9727
9728         free_irq(tnapi->irq_vec, tnapi);
9729
9730         err = tg3_request_irq(tp, 0);
9731
9732         if (err)
9733                 return err;
9734
9735         if (intr_ok) {
9736                 /* Reenable MSI one shot mode. */
9737                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9738                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9739                         tw32(MSGINT_MODE, val);
9740                 }
9741                 return 0;
9742         }
9743
9744         return -EIO;
9745 }
9746
9747 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9748  * successfully restored
9749  */
9750 static int tg3_test_msi(struct tg3 *tp)
9751 {
9752         int err;
9753         u16 pci_cmd;
9754
9755         if (!tg3_flag(tp, USING_MSI))
9756                 return 0;
9757
9758         /* Turn off SERR reporting in case MSI terminates with Master
9759          * Abort.
9760          */
9761         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9762         pci_write_config_word(tp->pdev, PCI_COMMAND,
9763                               pci_cmd & ~PCI_COMMAND_SERR);
9764
9765         err = tg3_test_interrupt(tp);
9766
9767         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9768
9769         if (!err)
9770                 return 0;
9771
9772         /* other failures */
9773         if (err != -EIO)
9774                 return err;
9775
9776         /* MSI test failed, go back to INTx mode */
9777         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9778                     "to INTx mode. Please report this failure to the PCI "
9779                     "maintainer and include system chipset information\n");
9780
9781         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9782
9783         pci_disable_msi(tp->pdev);
9784
9785         tg3_flag_clear(tp, USING_MSI);
9786         tp->napi[0].irq_vec = tp->pdev->irq;
9787
9788         err = tg3_request_irq(tp, 0);
9789         if (err)
9790                 return err;
9791
9792         /* Need to reset the chip because the MSI cycle may have terminated
9793          * with Master Abort.
9794          */
9795         tg3_full_lock(tp, 1);
9796
9797         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9798         err = tg3_init_hw(tp, 1);
9799
9800         tg3_full_unlock(tp);
9801
9802         if (err)
9803                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9804
9805         return err;
9806 }
9807
9808 static int tg3_request_firmware(struct tg3 *tp)
9809 {
9810         const __be32 *fw_data;
9811
9812         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9813                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9814                            tp->fw_needed);
9815                 return -ENOENT;
9816         }
9817
9818         fw_data = (void *)tp->fw->data;
9819
9820         /* Firmware blob starts with version numbers, followed by
9821          * start address and _full_ length including BSS sections
9822          * (which must be longer than the actual data, of course
9823          */
9824
9825         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9826         if (tp->fw_len < (tp->fw->size - 12)) {
9827                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9828                            tp->fw_len, tp->fw_needed);
9829                 release_firmware(tp->fw);
9830                 tp->fw = NULL;
9831                 return -EINVAL;
9832         }
9833
9834         /* We no longer need firmware; we have it. */
9835         tp->fw_needed = NULL;
9836         return 0;
9837 }
9838
9839 static bool tg3_enable_msix(struct tg3 *tp)
9840 {
9841         int i, rc;
9842         struct msix_entry msix_ent[tp->irq_max];
9843
9844         tp->irq_cnt = num_online_cpus();
9845         if (tp->irq_cnt > 1) {
9846                 /* We want as many rx rings enabled as there are cpus.
9847                  * In multiqueue MSI-X mode, the first MSI-X vector
9848                  * only deals with link interrupts, etc, so we add
9849                  * one to the number of vectors we are requesting.
9850                  */
9851                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9852         }
9853
9854         for (i = 0; i < tp->irq_max; i++) {
9855                 msix_ent[i].entry  = i;
9856                 msix_ent[i].vector = 0;
9857         }
9858
9859         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9860         if (rc < 0) {
9861                 return false;
9862         } else if (rc != 0) {
9863                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9864                         return false;
9865                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9866                               tp->irq_cnt, rc);
9867                 tp->irq_cnt = rc;
9868         }
9869
9870         for (i = 0; i < tp->irq_max; i++)
9871                 tp->napi[i].irq_vec = msix_ent[i].vector;
9872
9873         netif_set_real_num_tx_queues(tp->dev, 1);
9874         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9875         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9876                 pci_disable_msix(tp->pdev);
9877                 return false;
9878         }
9879
9880         if (tp->irq_cnt > 1) {
9881                 tg3_flag_set(tp, ENABLE_RSS);
9882
9883                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9884                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9885                         tg3_flag_set(tp, ENABLE_TSS);
9886                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9887                 }
9888         }
9889
9890         return true;
9891 }
9892
9893 static void tg3_ints_init(struct tg3 *tp)
9894 {
9895         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9896             !tg3_flag(tp, TAGGED_STATUS)) {
9897                 /* All MSI supporting chips should support tagged
9898                  * status.  Assert that this is the case.
9899                  */
9900                 netdev_warn(tp->dev,
9901                             "MSI without TAGGED_STATUS? Not using MSI\n");
9902                 goto defcfg;
9903         }
9904
9905         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9906                 tg3_flag_set(tp, USING_MSIX);
9907         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9908                 tg3_flag_set(tp, USING_MSI);
9909
9910         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9911                 u32 msi_mode = tr32(MSGINT_MODE);
9912                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9913                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9914                 if (!tg3_flag(tp, 1SHOT_MSI))
9915                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9916                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9917         }
9918 defcfg:
9919         if (!tg3_flag(tp, USING_MSIX)) {
9920                 tp->irq_cnt = 1;
9921                 tp->napi[0].irq_vec = tp->pdev->irq;
9922                 netif_set_real_num_tx_queues(tp->dev, 1);
9923                 netif_set_real_num_rx_queues(tp->dev, 1);
9924         }
9925 }
9926
9927 static void tg3_ints_fini(struct tg3 *tp)
9928 {
9929         if (tg3_flag(tp, USING_MSIX))
9930                 pci_disable_msix(tp->pdev);
9931         else if (tg3_flag(tp, USING_MSI))
9932                 pci_disable_msi(tp->pdev);
9933         tg3_flag_clear(tp, USING_MSI);
9934         tg3_flag_clear(tp, USING_MSIX);
9935         tg3_flag_clear(tp, ENABLE_RSS);
9936         tg3_flag_clear(tp, ENABLE_TSS);
9937 }
9938
9939 static int tg3_open(struct net_device *dev)
9940 {
9941         struct tg3 *tp = netdev_priv(dev);
9942         int i, err;
9943
9944         if (tp->fw_needed) {
9945                 err = tg3_request_firmware(tp);
9946                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9947                         if (err)
9948                                 return err;
9949                 } else if (err) {
9950                         netdev_warn(tp->dev, "TSO capability disabled\n");
9951                         tg3_flag_clear(tp, TSO_CAPABLE);
9952                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9953                         netdev_notice(tp->dev, "TSO capability restored\n");
9954                         tg3_flag_set(tp, TSO_CAPABLE);
9955                 }
9956         }
9957
9958         netif_carrier_off(tp->dev);
9959
9960         err = tg3_power_up(tp);
9961         if (err)
9962                 return err;
9963
9964         tg3_full_lock(tp, 0);
9965
9966         tg3_disable_ints(tp);
9967         tg3_flag_clear(tp, INIT_COMPLETE);
9968
9969         tg3_full_unlock(tp);
9970
9971         /*
9972          * Setup interrupts first so we know how
9973          * many NAPI resources to allocate
9974          */
9975         tg3_ints_init(tp);
9976
9977         tg3_rss_check_indir_tbl(tp);
9978
9979         /* The placement of this call is tied
9980          * to the setup and use of Host TX descriptors.
9981          */
9982         err = tg3_alloc_consistent(tp);
9983         if (err)
9984                 goto err_out1;
9985
9986         tg3_napi_init(tp);
9987
9988         tg3_napi_enable(tp);
9989
9990         for (i = 0; i < tp->irq_cnt; i++) {
9991                 struct tg3_napi *tnapi = &tp->napi[i];
9992                 err = tg3_request_irq(tp, i);
9993                 if (err) {
9994                         for (i--; i >= 0; i--) {
9995                                 tnapi = &tp->napi[i];
9996                                 free_irq(tnapi->irq_vec, tnapi);
9997                         }
9998                         goto err_out2;
9999                 }
10000         }
10001
10002         tg3_full_lock(tp, 0);
10003
10004         err = tg3_init_hw(tp, 1);
10005         if (err) {
10006                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10007                 tg3_free_rings(tp);
10008         } else {
10009                 if (tg3_flag(tp, TAGGED_STATUS) &&
10010                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10011                     !tg3_flag(tp, 57765_CLASS))
10012                         tp->timer_offset = HZ;
10013                 else
10014                         tp->timer_offset = HZ / 10;
10015
10016                 BUG_ON(tp->timer_offset > HZ);
10017                 tp->timer_counter = tp->timer_multiplier =
10018                         (HZ / tp->timer_offset);
10019                 tp->asf_counter = tp->asf_multiplier =
10020                         ((HZ / tp->timer_offset) * 2);
10021
10022                 init_timer(&tp->timer);
10023                 tp->timer.expires = jiffies + tp->timer_offset;
10024                 tp->timer.data = (unsigned long) tp;
10025                 tp->timer.function = tg3_timer;
10026         }
10027
10028         tg3_full_unlock(tp);
10029
10030         if (err)
10031                 goto err_out3;
10032
10033         if (tg3_flag(tp, USING_MSI)) {
10034                 err = tg3_test_msi(tp);
10035
10036                 if (err) {
10037                         tg3_full_lock(tp, 0);
10038                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10039                         tg3_free_rings(tp);
10040                         tg3_full_unlock(tp);
10041
10042                         goto err_out2;
10043                 }
10044
10045                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10046                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10047
10048                         tw32(PCIE_TRANSACTION_CFG,
10049                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10050                 }
10051         }
10052
10053         tg3_phy_start(tp);
10054
10055         tg3_full_lock(tp, 0);
10056
10057         add_timer(&tp->timer);
10058         tg3_flag_set(tp, INIT_COMPLETE);
10059         tg3_enable_ints(tp);
10060
10061         tg3_full_unlock(tp);
10062
10063         netif_tx_start_all_queues(dev);
10064
10065         /*
10066          * Reset loopback feature if it was turned on while the device was down
10067          * make sure that it's installed properly now.
10068          */
10069         if (dev->features & NETIF_F_LOOPBACK)
10070                 tg3_set_loopback(dev, dev->features);
10071
10072         return 0;
10073
10074 err_out3:
10075         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10076                 struct tg3_napi *tnapi = &tp->napi[i];
10077                 free_irq(tnapi->irq_vec, tnapi);
10078         }
10079
10080 err_out2:
10081         tg3_napi_disable(tp);
10082         tg3_napi_fini(tp);
10083         tg3_free_consistent(tp);
10084
10085 err_out1:
10086         tg3_ints_fini(tp);
10087         tg3_frob_aux_power(tp, false);
10088         pci_set_power_state(tp->pdev, PCI_D3hot);
10089         return err;
10090 }
10091
10092 static int tg3_close(struct net_device *dev)
10093 {
10094         int i;
10095         struct tg3 *tp = netdev_priv(dev);
10096
10097         tg3_napi_disable(tp);
10098         tg3_reset_task_cancel(tp);
10099
10100         netif_tx_stop_all_queues(dev);
10101
10102         del_timer_sync(&tp->timer);
10103
10104         tg3_phy_stop(tp);
10105
10106         tg3_full_lock(tp, 1);
10107
10108         tg3_disable_ints(tp);
10109
10110         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10111         tg3_free_rings(tp);
10112         tg3_flag_clear(tp, INIT_COMPLETE);
10113
10114         tg3_full_unlock(tp);
10115
10116         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10117                 struct tg3_napi *tnapi = &tp->napi[i];
10118                 free_irq(tnapi->irq_vec, tnapi);
10119         }
10120
10121         tg3_ints_fini(tp);
10122
10123         /* Clear stats across close / open calls */
10124         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10125         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10126
10127         tg3_napi_fini(tp);
10128
10129         tg3_free_consistent(tp);
10130
10131         tg3_power_down(tp);
10132
10133         netif_carrier_off(tp->dev);
10134
10135         return 0;
10136 }
10137
10138 static inline u64 get_stat64(tg3_stat64_t *val)
10139 {
10140        return ((u64)val->high << 32) | ((u64)val->low);
10141 }
10142
10143 static u64 calc_crc_errors(struct tg3 *tp)
10144 {
10145         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10146
10147         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10148             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10149              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10150                 u32 val;
10151
10152                 spin_lock_bh(&tp->lock);
10153                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10154                         tg3_writephy(tp, MII_TG3_TEST1,
10155                                      val | MII_TG3_TEST1_CRC_EN);
10156                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10157                 } else
10158                         val = 0;
10159                 spin_unlock_bh(&tp->lock);
10160
10161                 tp->phy_crc_errors += val;
10162
10163                 return tp->phy_crc_errors;
10164         }
10165
10166         return get_stat64(&hw_stats->rx_fcs_errors);
10167 }
10168
10169 #define ESTAT_ADD(member) \
10170         estats->member =        old_estats->member + \
10171                                 get_stat64(&hw_stats->member)
10172
10173 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10174                                                struct tg3_ethtool_stats *estats)
10175 {
10176         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10177         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10178
10179         ESTAT_ADD(rx_octets);
10180         ESTAT_ADD(rx_fragments);
10181         ESTAT_ADD(rx_ucast_packets);
10182         ESTAT_ADD(rx_mcast_packets);
10183         ESTAT_ADD(rx_bcast_packets);
10184         ESTAT_ADD(rx_fcs_errors);
10185         ESTAT_ADD(rx_align_errors);
10186         ESTAT_ADD(rx_xon_pause_rcvd);
10187         ESTAT_ADD(rx_xoff_pause_rcvd);
10188         ESTAT_ADD(rx_mac_ctrl_rcvd);
10189         ESTAT_ADD(rx_xoff_entered);
10190         ESTAT_ADD(rx_frame_too_long_errors);
10191         ESTAT_ADD(rx_jabbers);
10192         ESTAT_ADD(rx_undersize_packets);
10193         ESTAT_ADD(rx_in_length_errors);
10194         ESTAT_ADD(rx_out_length_errors);
10195         ESTAT_ADD(rx_64_or_less_octet_packets);
10196         ESTAT_ADD(rx_65_to_127_octet_packets);
10197         ESTAT_ADD(rx_128_to_255_octet_packets);
10198         ESTAT_ADD(rx_256_to_511_octet_packets);
10199         ESTAT_ADD(rx_512_to_1023_octet_packets);
10200         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10201         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10202         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10203         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10204         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10205
10206         ESTAT_ADD(tx_octets);
10207         ESTAT_ADD(tx_collisions);
10208         ESTAT_ADD(tx_xon_sent);
10209         ESTAT_ADD(tx_xoff_sent);
10210         ESTAT_ADD(tx_flow_control);
10211         ESTAT_ADD(tx_mac_errors);
10212         ESTAT_ADD(tx_single_collisions);
10213         ESTAT_ADD(tx_mult_collisions);
10214         ESTAT_ADD(tx_deferred);
10215         ESTAT_ADD(tx_excessive_collisions);
10216         ESTAT_ADD(tx_late_collisions);
10217         ESTAT_ADD(tx_collide_2times);
10218         ESTAT_ADD(tx_collide_3times);
10219         ESTAT_ADD(tx_collide_4times);
10220         ESTAT_ADD(tx_collide_5times);
10221         ESTAT_ADD(tx_collide_6times);
10222         ESTAT_ADD(tx_collide_7times);
10223         ESTAT_ADD(tx_collide_8times);
10224         ESTAT_ADD(tx_collide_9times);
10225         ESTAT_ADD(tx_collide_10times);
10226         ESTAT_ADD(tx_collide_11times);
10227         ESTAT_ADD(tx_collide_12times);
10228         ESTAT_ADD(tx_collide_13times);
10229         ESTAT_ADD(tx_collide_14times);
10230         ESTAT_ADD(tx_collide_15times);
10231         ESTAT_ADD(tx_ucast_packets);
10232         ESTAT_ADD(tx_mcast_packets);
10233         ESTAT_ADD(tx_bcast_packets);
10234         ESTAT_ADD(tx_carrier_sense_errors);
10235         ESTAT_ADD(tx_discards);
10236         ESTAT_ADD(tx_errors);
10237
10238         ESTAT_ADD(dma_writeq_full);
10239         ESTAT_ADD(dma_write_prioq_full);
10240         ESTAT_ADD(rxbds_empty);
10241         ESTAT_ADD(rx_discards);
10242         ESTAT_ADD(rx_errors);
10243         ESTAT_ADD(rx_threshold_hit);
10244
10245         ESTAT_ADD(dma_readq_full);
10246         ESTAT_ADD(dma_read_prioq_full);
10247         ESTAT_ADD(tx_comp_queue_full);
10248
10249         ESTAT_ADD(ring_set_send_prod_index);
10250         ESTAT_ADD(ring_status_update);
10251         ESTAT_ADD(nic_irqs);
10252         ESTAT_ADD(nic_avoided_irqs);
10253         ESTAT_ADD(nic_tx_threshold_hit);
10254
10255         ESTAT_ADD(mbuf_lwm_thresh_hit);
10256
10257         return estats;
10258 }
10259
10260 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10261                                                  struct rtnl_link_stats64 *stats)
10262 {
10263         struct tg3 *tp = netdev_priv(dev);
10264         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10265         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10266
10267         if (!hw_stats)
10268                 return old_stats;
10269
10270         stats->rx_packets = old_stats->rx_packets +
10271                 get_stat64(&hw_stats->rx_ucast_packets) +
10272                 get_stat64(&hw_stats->rx_mcast_packets) +
10273                 get_stat64(&hw_stats->rx_bcast_packets);
10274
10275         stats->tx_packets = old_stats->tx_packets +
10276                 get_stat64(&hw_stats->tx_ucast_packets) +
10277                 get_stat64(&hw_stats->tx_mcast_packets) +
10278                 get_stat64(&hw_stats->tx_bcast_packets);
10279
10280         stats->rx_bytes = old_stats->rx_bytes +
10281                 get_stat64(&hw_stats->rx_octets);
10282         stats->tx_bytes = old_stats->tx_bytes +
10283                 get_stat64(&hw_stats->tx_octets);
10284
10285         stats->rx_errors = old_stats->rx_errors +
10286                 get_stat64(&hw_stats->rx_errors);
10287         stats->tx_errors = old_stats->tx_errors +
10288                 get_stat64(&hw_stats->tx_errors) +
10289                 get_stat64(&hw_stats->tx_mac_errors) +
10290                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10291                 get_stat64(&hw_stats->tx_discards);
10292
10293         stats->multicast = old_stats->multicast +
10294                 get_stat64(&hw_stats->rx_mcast_packets);
10295         stats->collisions = old_stats->collisions +
10296                 get_stat64(&hw_stats->tx_collisions);
10297
10298         stats->rx_length_errors = old_stats->rx_length_errors +
10299                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10300                 get_stat64(&hw_stats->rx_undersize_packets);
10301
10302         stats->rx_over_errors = old_stats->rx_over_errors +
10303                 get_stat64(&hw_stats->rxbds_empty);
10304         stats->rx_frame_errors = old_stats->rx_frame_errors +
10305                 get_stat64(&hw_stats->rx_align_errors);
10306         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10307                 get_stat64(&hw_stats->tx_discards);
10308         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10309                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10310
10311         stats->rx_crc_errors = old_stats->rx_crc_errors +
10312                 calc_crc_errors(tp);
10313
10314         stats->rx_missed_errors = old_stats->rx_missed_errors +
10315                 get_stat64(&hw_stats->rx_discards);
10316
10317         stats->rx_dropped = tp->rx_dropped;
10318         stats->tx_dropped = tp->tx_dropped;
10319
10320         return stats;
10321 }
10322
10323 static int tg3_get_regs_len(struct net_device *dev)
10324 {
10325         return TG3_REG_BLK_SIZE;
10326 }
10327
10328 static void tg3_get_regs(struct net_device *dev,
10329                 struct ethtool_regs *regs, void *_p)
10330 {
10331         struct tg3 *tp = netdev_priv(dev);
10332
10333         regs->version = 0;
10334
10335         memset(_p, 0, TG3_REG_BLK_SIZE);
10336
10337         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10338                 return;
10339
10340         tg3_full_lock(tp, 0);
10341
10342         tg3_dump_legacy_regs(tp, (u32 *)_p);
10343
10344         tg3_full_unlock(tp);
10345 }
10346
10347 static int tg3_get_eeprom_len(struct net_device *dev)
10348 {
10349         struct tg3 *tp = netdev_priv(dev);
10350
10351         return tp->nvram_size;
10352 }
10353
10354 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10355 {
10356         struct tg3 *tp = netdev_priv(dev);
10357         int ret;
10358         u8  *pd;
10359         u32 i, offset, len, b_offset, b_count;
10360         __be32 val;
10361
10362         if (tg3_flag(tp, NO_NVRAM))
10363                 return -EINVAL;
10364
10365         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10366                 return -EAGAIN;
10367
10368         offset = eeprom->offset;
10369         len = eeprom->len;
10370         eeprom->len = 0;
10371
10372         eeprom->magic = TG3_EEPROM_MAGIC;
10373
10374         if (offset & 3) {
10375                 /* adjustments to start on required 4 byte boundary */
10376                 b_offset = offset & 3;
10377                 b_count = 4 - b_offset;
10378                 if (b_count > len) {
10379                         /* i.e. offset=1 len=2 */
10380                         b_count = len;
10381                 }
10382                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10383                 if (ret)
10384                         return ret;
10385                 memcpy(data, ((char *)&val) + b_offset, b_count);
10386                 len -= b_count;
10387                 offset += b_count;
10388                 eeprom->len += b_count;
10389         }
10390
10391         /* read bytes up to the last 4 byte boundary */
10392         pd = &data[eeprom->len];
10393         for (i = 0; i < (len - (len & 3)); i += 4) {
10394                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10395                 if (ret) {
10396                         eeprom->len += i;
10397                         return ret;
10398                 }
10399                 memcpy(pd + i, &val, 4);
10400         }
10401         eeprom->len += i;
10402
10403         if (len & 3) {
10404                 /* read last bytes not ending on 4 byte boundary */
10405                 pd = &data[eeprom->len];
10406                 b_count = len & 3;
10407                 b_offset = offset + len - b_count;
10408                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10409                 if (ret)
10410                         return ret;
10411                 memcpy(pd, &val, b_count);
10412                 eeprom->len += b_count;
10413         }
10414         return 0;
10415 }
10416
10417 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10418 {
10419         struct tg3 *tp = netdev_priv(dev);
10420         int ret;
10421         u32 offset, len, b_offset, odd_len;
10422         u8 *buf;
10423         __be32 start, end;
10424
10425         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10426                 return -EAGAIN;
10427
10428         if (tg3_flag(tp, NO_NVRAM) ||
10429             eeprom->magic != TG3_EEPROM_MAGIC)
10430                 return -EINVAL;
10431
10432         offset = eeprom->offset;
10433         len = eeprom->len;
10434
10435         if ((b_offset = (offset & 3))) {
10436                 /* adjustments to start on required 4 byte boundary */
10437                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10438                 if (ret)
10439                         return ret;
10440                 len += b_offset;
10441                 offset &= ~3;
10442                 if (len < 4)
10443                         len = 4;
10444         }
10445
10446         odd_len = 0;
10447         if (len & 3) {
10448                 /* adjustments to end on required 4 byte boundary */
10449                 odd_len = 1;
10450                 len = (len + 3) & ~3;
10451                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10452                 if (ret)
10453                         return ret;
10454         }
10455
10456         buf = data;
10457         if (b_offset || odd_len) {
10458                 buf = kmalloc(len, GFP_KERNEL);
10459                 if (!buf)
10460                         return -ENOMEM;
10461                 if (b_offset)
10462                         memcpy(buf, &start, 4);
10463                 if (odd_len)
10464                         memcpy(buf+len-4, &end, 4);
10465                 memcpy(buf + b_offset, data, eeprom->len);
10466         }
10467
10468         ret = tg3_nvram_write_block(tp, offset, len, buf);
10469
10470         if (buf != data)
10471                 kfree(buf);
10472
10473         return ret;
10474 }
10475
10476 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10477 {
10478         struct tg3 *tp = netdev_priv(dev);
10479
10480         if (tg3_flag(tp, USE_PHYLIB)) {
10481                 struct phy_device *phydev;
10482                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10483                         return -EAGAIN;
10484                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10485                 return phy_ethtool_gset(phydev, cmd);
10486         }
10487
10488         cmd->supported = (SUPPORTED_Autoneg);
10489
10490         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10491                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10492                                    SUPPORTED_1000baseT_Full);
10493
10494         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10495                 cmd->supported |= (SUPPORTED_100baseT_Half |
10496                                   SUPPORTED_100baseT_Full |
10497                                   SUPPORTED_10baseT_Half |
10498                                   SUPPORTED_10baseT_Full |
10499                                   SUPPORTED_TP);
10500                 cmd->port = PORT_TP;
10501         } else {
10502                 cmd->supported |= SUPPORTED_FIBRE;
10503                 cmd->port = PORT_FIBRE;
10504         }
10505
10506         cmd->advertising = tp->link_config.advertising;
10507         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10508                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10509                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10510                                 cmd->advertising |= ADVERTISED_Pause;
10511                         } else {
10512                                 cmd->advertising |= ADVERTISED_Pause |
10513                                                     ADVERTISED_Asym_Pause;
10514                         }
10515                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10516                         cmd->advertising |= ADVERTISED_Asym_Pause;
10517                 }
10518         }
10519         if (netif_running(dev) && netif_carrier_ok(dev)) {
10520                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10521                 cmd->duplex = tp->link_config.active_duplex;
10522                 cmd->lp_advertising = tp->link_config.rmt_adv;
10523                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10524                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10525                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10526                         else
10527                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10528                 }
10529         } else {
10530                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10531                 cmd->duplex = DUPLEX_INVALID;
10532                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10533         }
10534         cmd->phy_address = tp->phy_addr;
10535         cmd->transceiver = XCVR_INTERNAL;
10536         cmd->autoneg = tp->link_config.autoneg;
10537         cmd->maxtxpkt = 0;
10538         cmd->maxrxpkt = 0;
10539         return 0;
10540 }
10541
10542 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10543 {
10544         struct tg3 *tp = netdev_priv(dev);
10545         u32 speed = ethtool_cmd_speed(cmd);
10546
10547         if (tg3_flag(tp, USE_PHYLIB)) {
10548                 struct phy_device *phydev;
10549                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10550                         return -EAGAIN;
10551                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10552                 return phy_ethtool_sset(phydev, cmd);
10553         }
10554
10555         if (cmd->autoneg != AUTONEG_ENABLE &&
10556             cmd->autoneg != AUTONEG_DISABLE)
10557                 return -EINVAL;
10558
10559         if (cmd->autoneg == AUTONEG_DISABLE &&
10560             cmd->duplex != DUPLEX_FULL &&
10561             cmd->duplex != DUPLEX_HALF)
10562                 return -EINVAL;
10563
10564         if (cmd->autoneg == AUTONEG_ENABLE) {
10565                 u32 mask = ADVERTISED_Autoneg |
10566                            ADVERTISED_Pause |
10567                            ADVERTISED_Asym_Pause;
10568
10569                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10570                         mask |= ADVERTISED_1000baseT_Half |
10571                                 ADVERTISED_1000baseT_Full;
10572
10573                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10574                         mask |= ADVERTISED_100baseT_Half |
10575                                 ADVERTISED_100baseT_Full |
10576                                 ADVERTISED_10baseT_Half |
10577                                 ADVERTISED_10baseT_Full |
10578                                 ADVERTISED_TP;
10579                 else
10580                         mask |= ADVERTISED_FIBRE;
10581
10582                 if (cmd->advertising & ~mask)
10583                         return -EINVAL;
10584
10585                 mask &= (ADVERTISED_1000baseT_Half |
10586                          ADVERTISED_1000baseT_Full |
10587                          ADVERTISED_100baseT_Half |
10588                          ADVERTISED_100baseT_Full |
10589                          ADVERTISED_10baseT_Half |
10590                          ADVERTISED_10baseT_Full);
10591
10592                 cmd->advertising &= mask;
10593         } else {
10594                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10595                         if (speed != SPEED_1000)
10596                                 return -EINVAL;
10597
10598                         if (cmd->duplex != DUPLEX_FULL)
10599                                 return -EINVAL;
10600                 } else {
10601                         if (speed != SPEED_100 &&
10602                             speed != SPEED_10)
10603                                 return -EINVAL;
10604                 }
10605         }
10606
10607         tg3_full_lock(tp, 0);
10608
10609         tp->link_config.autoneg = cmd->autoneg;
10610         if (cmd->autoneg == AUTONEG_ENABLE) {
10611                 tp->link_config.advertising = (cmd->advertising |
10612                                               ADVERTISED_Autoneg);
10613                 tp->link_config.speed = SPEED_INVALID;
10614                 tp->link_config.duplex = DUPLEX_INVALID;
10615         } else {
10616                 tp->link_config.advertising = 0;
10617                 tp->link_config.speed = speed;
10618                 tp->link_config.duplex = cmd->duplex;
10619         }
10620
10621         tp->link_config.orig_speed = tp->link_config.speed;
10622         tp->link_config.orig_duplex = tp->link_config.duplex;
10623         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10624
10625         if (netif_running(dev))
10626                 tg3_setup_phy(tp, 1);
10627
10628         tg3_full_unlock(tp);
10629
10630         return 0;
10631 }
10632
10633 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10634 {
10635         struct tg3 *tp = netdev_priv(dev);
10636
10637         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10638         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10639         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10640         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10641 }
10642
10643 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10644 {
10645         struct tg3 *tp = netdev_priv(dev);
10646
10647         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10648                 wol->supported = WAKE_MAGIC;
10649         else
10650                 wol->supported = 0;
10651         wol->wolopts = 0;
10652         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10653                 wol->wolopts = WAKE_MAGIC;
10654         memset(&wol->sopass, 0, sizeof(wol->sopass));
10655 }
10656
10657 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10658 {
10659         struct tg3 *tp = netdev_priv(dev);
10660         struct device *dp = &tp->pdev->dev;
10661
10662         if (wol->wolopts & ~WAKE_MAGIC)
10663                 return -EINVAL;
10664         if ((wol->wolopts & WAKE_MAGIC) &&
10665             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10666                 return -EINVAL;
10667
10668         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10669
10670         spin_lock_bh(&tp->lock);
10671         if (device_may_wakeup(dp))
10672                 tg3_flag_set(tp, WOL_ENABLE);
10673         else
10674                 tg3_flag_clear(tp, WOL_ENABLE);
10675         spin_unlock_bh(&tp->lock);
10676
10677         return 0;
10678 }
10679
10680 static u32 tg3_get_msglevel(struct net_device *dev)
10681 {
10682         struct tg3 *tp = netdev_priv(dev);
10683         return tp->msg_enable;
10684 }
10685
10686 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10687 {
10688         struct tg3 *tp = netdev_priv(dev);
10689         tp->msg_enable = value;
10690 }
10691
10692 static int tg3_nway_reset(struct net_device *dev)
10693 {
10694         struct tg3 *tp = netdev_priv(dev);
10695         int r;
10696
10697         if (!netif_running(dev))
10698                 return -EAGAIN;
10699
10700         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10701                 return -EINVAL;
10702
10703         if (tg3_flag(tp, USE_PHYLIB)) {
10704                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10705                         return -EAGAIN;
10706                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10707         } else {
10708                 u32 bmcr;
10709
10710                 spin_lock_bh(&tp->lock);
10711                 r = -EINVAL;
10712                 tg3_readphy(tp, MII_BMCR, &bmcr);
10713                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10714                     ((bmcr & BMCR_ANENABLE) ||
10715                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10716                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10717                                                    BMCR_ANENABLE);
10718                         r = 0;
10719                 }
10720                 spin_unlock_bh(&tp->lock);
10721         }
10722
10723         return r;
10724 }
10725
10726 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10727 {
10728         struct tg3 *tp = netdev_priv(dev);
10729
10730         ering->rx_max_pending = tp->rx_std_ring_mask;
10731         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10732                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10733         else
10734                 ering->rx_jumbo_max_pending = 0;
10735
10736         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10737
10738         ering->rx_pending = tp->rx_pending;
10739         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10740                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10741         else
10742                 ering->rx_jumbo_pending = 0;
10743
10744         ering->tx_pending = tp->napi[0].tx_pending;
10745 }
10746
10747 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10748 {
10749         struct tg3 *tp = netdev_priv(dev);
10750         int i, irq_sync = 0, err = 0;
10751
10752         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10753             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10754             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10755             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10756             (tg3_flag(tp, TSO_BUG) &&
10757              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10758                 return -EINVAL;
10759
10760         if (netif_running(dev)) {
10761                 tg3_phy_stop(tp);
10762                 tg3_netif_stop(tp);
10763                 irq_sync = 1;
10764         }
10765
10766         tg3_full_lock(tp, irq_sync);
10767
10768         tp->rx_pending = ering->rx_pending;
10769
10770         if (tg3_flag(tp, MAX_RXPEND_64) &&
10771             tp->rx_pending > 63)
10772                 tp->rx_pending = 63;
10773         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10774
10775         for (i = 0; i < tp->irq_max; i++)
10776                 tp->napi[i].tx_pending = ering->tx_pending;
10777
10778         if (netif_running(dev)) {
10779                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10780                 err = tg3_restart_hw(tp, 1);
10781                 if (!err)
10782                         tg3_netif_start(tp);
10783         }
10784
10785         tg3_full_unlock(tp);
10786
10787         if (irq_sync && !err)
10788                 tg3_phy_start(tp);
10789
10790         return err;
10791 }
10792
10793 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10794 {
10795         struct tg3 *tp = netdev_priv(dev);
10796
10797         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10798
10799         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10800                 epause->rx_pause = 1;
10801         else
10802                 epause->rx_pause = 0;
10803
10804         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10805                 epause->tx_pause = 1;
10806         else
10807                 epause->tx_pause = 0;
10808 }
10809
10810 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10811 {
10812         struct tg3 *tp = netdev_priv(dev);
10813         int err = 0;
10814
10815         if (tg3_flag(tp, USE_PHYLIB)) {
10816                 u32 newadv;
10817                 struct phy_device *phydev;
10818
10819                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10820
10821                 if (!(phydev->supported & SUPPORTED_Pause) ||
10822                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10823                      (epause->rx_pause != epause->tx_pause)))
10824                         return -EINVAL;
10825
10826                 tp->link_config.flowctrl = 0;
10827                 if (epause->rx_pause) {
10828                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10829
10830                         if (epause->tx_pause) {
10831                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10832                                 newadv = ADVERTISED_Pause;
10833                         } else
10834                                 newadv = ADVERTISED_Pause |
10835                                          ADVERTISED_Asym_Pause;
10836                 } else if (epause->tx_pause) {
10837                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10838                         newadv = ADVERTISED_Asym_Pause;
10839                 } else
10840                         newadv = 0;
10841
10842                 if (epause->autoneg)
10843                         tg3_flag_set(tp, PAUSE_AUTONEG);
10844                 else
10845                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10846
10847                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10848                         u32 oldadv = phydev->advertising &
10849                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10850                         if (oldadv != newadv) {
10851                                 phydev->advertising &=
10852                                         ~(ADVERTISED_Pause |
10853                                           ADVERTISED_Asym_Pause);
10854                                 phydev->advertising |= newadv;
10855                                 if (phydev->autoneg) {
10856                                         /*
10857                                          * Always renegotiate the link to
10858                                          * inform our link partner of our
10859                                          * flow control settings, even if the
10860                                          * flow control is forced.  Let
10861                                          * tg3_adjust_link() do the final
10862                                          * flow control setup.
10863                                          */
10864                                         return phy_start_aneg(phydev);
10865                                 }
10866                         }
10867
10868                         if (!epause->autoneg)
10869                                 tg3_setup_flow_control(tp, 0, 0);
10870                 } else {
10871                         tp->link_config.orig_advertising &=
10872                                         ~(ADVERTISED_Pause |
10873                                           ADVERTISED_Asym_Pause);
10874                         tp->link_config.orig_advertising |= newadv;
10875                 }
10876         } else {
10877                 int irq_sync = 0;
10878
10879                 if (netif_running(dev)) {
10880                         tg3_netif_stop(tp);
10881                         irq_sync = 1;
10882                 }
10883
10884                 tg3_full_lock(tp, irq_sync);
10885
10886                 if (epause->autoneg)
10887                         tg3_flag_set(tp, PAUSE_AUTONEG);
10888                 else
10889                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10890                 if (epause->rx_pause)
10891                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10892                 else
10893                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10894                 if (epause->tx_pause)
10895                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10896                 else
10897                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10898
10899                 if (netif_running(dev)) {
10900                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10901                         err = tg3_restart_hw(tp, 1);
10902                         if (!err)
10903                                 tg3_netif_start(tp);
10904                 }
10905
10906                 tg3_full_unlock(tp);
10907         }
10908
10909         return err;
10910 }
10911
10912 static int tg3_get_sset_count(struct net_device *dev, int sset)
10913 {
10914         switch (sset) {
10915         case ETH_SS_TEST:
10916                 return TG3_NUM_TEST;
10917         case ETH_SS_STATS:
10918                 return TG3_NUM_STATS;
10919         default:
10920                 return -EOPNOTSUPP;
10921         }
10922 }
10923
10924 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10925                          u32 *rules __always_unused)
10926 {
10927         struct tg3 *tp = netdev_priv(dev);
10928
10929         if (!tg3_flag(tp, SUPPORT_MSIX))
10930                 return -EOPNOTSUPP;
10931
10932         switch (info->cmd) {
10933         case ETHTOOL_GRXRINGS:
10934                 if (netif_running(tp->dev))
10935                         info->data = tp->irq_cnt;
10936                 else {
10937                         info->data = num_online_cpus();
10938                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10939                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10940                 }
10941
10942                 /* The first interrupt vector only
10943                  * handles link interrupts.
10944                  */
10945                 info->data -= 1;
10946                 return 0;
10947
10948         default:
10949                 return -EOPNOTSUPP;
10950         }
10951 }
10952
10953 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10954 {
10955         u32 size = 0;
10956         struct tg3 *tp = netdev_priv(dev);
10957
10958         if (tg3_flag(tp, SUPPORT_MSIX))
10959                 size = TG3_RSS_INDIR_TBL_SIZE;
10960
10961         return size;
10962 }
10963
10964 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10965 {
10966         struct tg3 *tp = netdev_priv(dev);
10967         int i;
10968
10969         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10970                 indir[i] = tp->rss_ind_tbl[i];
10971
10972         return 0;
10973 }
10974
10975 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10976 {
10977         struct tg3 *tp = netdev_priv(dev);
10978         size_t i;
10979
10980         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10981                 tp->rss_ind_tbl[i] = indir[i];
10982
10983         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10984                 return 0;
10985
10986         /* It is legal to write the indirection
10987          * table while the device is running.
10988          */
10989         tg3_full_lock(tp, 0);
10990         tg3_rss_write_indir_tbl(tp);
10991         tg3_full_unlock(tp);
10992
10993         return 0;
10994 }
10995
10996 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10997 {
10998         switch (stringset) {
10999         case ETH_SS_STATS:
11000                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11001                 break;
11002         case ETH_SS_TEST:
11003                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11004                 break;
11005         default:
11006                 WARN_ON(1);     /* we need a WARN() */
11007                 break;
11008         }
11009 }
11010
11011 static int tg3_set_phys_id(struct net_device *dev,
11012                             enum ethtool_phys_id_state state)
11013 {
11014         struct tg3 *tp = netdev_priv(dev);
11015
11016         if (!netif_running(tp->dev))
11017                 return -EAGAIN;
11018
11019         switch (state) {
11020         case ETHTOOL_ID_ACTIVE:
11021                 return 1;       /* cycle on/off once per second */
11022
11023         case ETHTOOL_ID_ON:
11024                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11025                      LED_CTRL_1000MBPS_ON |
11026                      LED_CTRL_100MBPS_ON |
11027                      LED_CTRL_10MBPS_ON |
11028                      LED_CTRL_TRAFFIC_OVERRIDE |
11029                      LED_CTRL_TRAFFIC_BLINK |
11030                      LED_CTRL_TRAFFIC_LED);
11031                 break;
11032
11033         case ETHTOOL_ID_OFF:
11034                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11035                      LED_CTRL_TRAFFIC_OVERRIDE);
11036                 break;
11037
11038         case ETHTOOL_ID_INACTIVE:
11039                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11040                 break;
11041         }
11042
11043         return 0;
11044 }
11045
11046 static void tg3_get_ethtool_stats(struct net_device *dev,
11047                                    struct ethtool_stats *estats, u64 *tmp_stats)
11048 {
11049         struct tg3 *tp = netdev_priv(dev);
11050
11051         if (tp->hw_stats)
11052                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11053         else
11054                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11055 }
11056
11057 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11058 {
11059         int i;
11060         __be32 *buf;
11061         u32 offset = 0, len = 0;
11062         u32 magic, val;
11063
11064         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11065                 return NULL;
11066
11067         if (magic == TG3_EEPROM_MAGIC) {
11068                 for (offset = TG3_NVM_DIR_START;
11069                      offset < TG3_NVM_DIR_END;
11070                      offset += TG3_NVM_DIRENT_SIZE) {
11071                         if (tg3_nvram_read(tp, offset, &val))
11072                                 return NULL;
11073
11074                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11075                             TG3_NVM_DIRTYPE_EXTVPD)
11076                                 break;
11077                 }
11078
11079                 if (offset != TG3_NVM_DIR_END) {
11080                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11081                         if (tg3_nvram_read(tp, offset + 4, &offset))
11082                                 return NULL;
11083
11084                         offset = tg3_nvram_logical_addr(tp, offset);
11085                 }
11086         }
11087
11088         if (!offset || !len) {
11089                 offset = TG3_NVM_VPD_OFF;
11090                 len = TG3_NVM_VPD_LEN;
11091         }
11092
11093         buf = kmalloc(len, GFP_KERNEL);
11094         if (buf == NULL)
11095                 return NULL;
11096
11097         if (magic == TG3_EEPROM_MAGIC) {
11098                 for (i = 0; i < len; i += 4) {
11099                         /* The data is in little-endian format in NVRAM.
11100                          * Use the big-endian read routines to preserve
11101                          * the byte order as it exists in NVRAM.
11102                          */
11103                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11104                                 goto error;
11105                 }
11106         } else {
11107                 u8 *ptr;
11108                 ssize_t cnt;
11109                 unsigned int pos = 0;
11110
11111                 ptr = (u8 *)&buf[0];
11112                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11113                         cnt = pci_read_vpd(tp->pdev, pos,
11114                                            len - pos, ptr);
11115                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11116                                 cnt = 0;
11117                         else if (cnt < 0)
11118                                 goto error;
11119                 }
11120                 if (pos != len)
11121                         goto error;
11122         }
11123
11124         *vpdlen = len;
11125
11126         return buf;
11127
11128 error:
11129         kfree(buf);
11130         return NULL;
11131 }
11132
11133 #define NVRAM_TEST_SIZE 0x100
11134 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11135 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11136 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11137 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11138 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11139 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11140 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11141 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11142
11143 static int tg3_test_nvram(struct tg3 *tp)
11144 {
11145         u32 csum, magic, len;
11146         __be32 *buf;
11147         int i, j, k, err = 0, size;
11148
11149         if (tg3_flag(tp, NO_NVRAM))
11150                 return 0;
11151
11152         if (tg3_nvram_read(tp, 0, &magic) != 0)
11153                 return -EIO;
11154
11155         if (magic == TG3_EEPROM_MAGIC)
11156                 size = NVRAM_TEST_SIZE;
11157         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11158                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11159                     TG3_EEPROM_SB_FORMAT_1) {
11160                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11161                         case TG3_EEPROM_SB_REVISION_0:
11162                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11163                                 break;
11164                         case TG3_EEPROM_SB_REVISION_2:
11165                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11166                                 break;
11167                         case TG3_EEPROM_SB_REVISION_3:
11168                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11169                                 break;
11170                         case TG3_EEPROM_SB_REVISION_4:
11171                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11172                                 break;
11173                         case TG3_EEPROM_SB_REVISION_5:
11174                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11175                                 break;
11176                         case TG3_EEPROM_SB_REVISION_6:
11177                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11178                                 break;
11179                         default:
11180                                 return -EIO;
11181                         }
11182                 } else
11183                         return 0;
11184         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11185                 size = NVRAM_SELFBOOT_HW_SIZE;
11186         else
11187                 return -EIO;
11188
11189         buf = kmalloc(size, GFP_KERNEL);
11190         if (buf == NULL)
11191                 return -ENOMEM;
11192
11193         err = -EIO;
11194         for (i = 0, j = 0; i < size; i += 4, j++) {
11195                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11196                 if (err)
11197                         break;
11198         }
11199         if (i < size)
11200                 goto out;
11201
11202         /* Selfboot format */
11203         magic = be32_to_cpu(buf[0]);
11204         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11205             TG3_EEPROM_MAGIC_FW) {
11206                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11207
11208                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11209                     TG3_EEPROM_SB_REVISION_2) {
11210                         /* For rev 2, the csum doesn't include the MBA. */
11211                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11212                                 csum8 += buf8[i];
11213                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11214                                 csum8 += buf8[i];
11215                 } else {
11216                         for (i = 0; i < size; i++)
11217                                 csum8 += buf8[i];
11218                 }
11219
11220                 if (csum8 == 0) {
11221                         err = 0;
11222                         goto out;
11223                 }
11224
11225                 err = -EIO;
11226                 goto out;
11227         }
11228
11229         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11230             TG3_EEPROM_MAGIC_HW) {
11231                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11232                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11233                 u8 *buf8 = (u8 *) buf;
11234
11235                 /* Separate the parity bits and the data bytes.  */
11236                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11237                         if ((i == 0) || (i == 8)) {
11238                                 int l;
11239                                 u8 msk;
11240
11241                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11242                                         parity[k++] = buf8[i] & msk;
11243                                 i++;
11244                         } else if (i == 16) {
11245                                 int l;
11246                                 u8 msk;
11247
11248                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11249                                         parity[k++] = buf8[i] & msk;
11250                                 i++;
11251
11252                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11253                                         parity[k++] = buf8[i] & msk;
11254                                 i++;
11255                         }
11256                         data[j++] = buf8[i];
11257                 }
11258
11259                 err = -EIO;
11260                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11261                         u8 hw8 = hweight8(data[i]);
11262
11263                         if ((hw8 & 0x1) && parity[i])
11264                                 goto out;
11265                         else if (!(hw8 & 0x1) && !parity[i])
11266                                 goto out;
11267                 }
11268                 err = 0;
11269                 goto out;
11270         }
11271
11272         err = -EIO;
11273
11274         /* Bootstrap checksum at offset 0x10 */
11275         csum = calc_crc((unsigned char *) buf, 0x10);
11276         if (csum != le32_to_cpu(buf[0x10/4]))
11277                 goto out;
11278
11279         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11280         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11281         if (csum != le32_to_cpu(buf[0xfc/4]))
11282                 goto out;
11283
11284         kfree(buf);
11285
11286         buf = tg3_vpd_readblock(tp, &len);
11287         if (!buf)
11288                 return -ENOMEM;
11289
11290         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11291         if (i > 0) {
11292                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11293                 if (j < 0)
11294                         goto out;
11295
11296                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11297                         goto out;
11298
11299                 i += PCI_VPD_LRDT_TAG_SIZE;
11300                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11301                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11302                 if (j > 0) {
11303                         u8 csum8 = 0;
11304
11305                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11306
11307                         for (i = 0; i <= j; i++)
11308                                 csum8 += ((u8 *)buf)[i];
11309
11310                         if (csum8)
11311                                 goto out;
11312                 }
11313         }
11314
11315         err = 0;
11316
11317 out:
11318         kfree(buf);
11319         return err;
11320 }
11321
11322 #define TG3_SERDES_TIMEOUT_SEC  2
11323 #define TG3_COPPER_TIMEOUT_SEC  6
11324
11325 static int tg3_test_link(struct tg3 *tp)
11326 {
11327         int i, max;
11328
11329         if (!netif_running(tp->dev))
11330                 return -ENODEV;
11331
11332         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11333                 max = TG3_SERDES_TIMEOUT_SEC;
11334         else
11335                 max = TG3_COPPER_TIMEOUT_SEC;
11336
11337         for (i = 0; i < max; i++) {
11338                 if (netif_carrier_ok(tp->dev))
11339                         return 0;
11340
11341                 if (msleep_interruptible(1000))
11342                         break;
11343         }
11344
11345         return -EIO;
11346 }
11347
11348 /* Only test the commonly used registers */
11349 static int tg3_test_registers(struct tg3 *tp)
11350 {
11351         int i, is_5705, is_5750;
11352         u32 offset, read_mask, write_mask, val, save_val, read_val;
11353         static struct {
11354                 u16 offset;
11355                 u16 flags;
11356 #define TG3_FL_5705     0x1
11357 #define TG3_FL_NOT_5705 0x2
11358 #define TG3_FL_NOT_5788 0x4
11359 #define TG3_FL_NOT_5750 0x8
11360                 u32 read_mask;
11361                 u32 write_mask;
11362         } reg_tbl[] = {
11363                 /* MAC Control Registers */
11364                 { MAC_MODE, TG3_FL_NOT_5705,
11365                         0x00000000, 0x00ef6f8c },
11366                 { MAC_MODE, TG3_FL_5705,
11367                         0x00000000, 0x01ef6b8c },
11368                 { MAC_STATUS, TG3_FL_NOT_5705,
11369                         0x03800107, 0x00000000 },
11370                 { MAC_STATUS, TG3_FL_5705,
11371                         0x03800100, 0x00000000 },
11372                 { MAC_ADDR_0_HIGH, 0x0000,
11373                         0x00000000, 0x0000ffff },
11374                 { MAC_ADDR_0_LOW, 0x0000,
11375                         0x00000000, 0xffffffff },
11376                 { MAC_RX_MTU_SIZE, 0x0000,
11377                         0x00000000, 0x0000ffff },
11378                 { MAC_TX_MODE, 0x0000,
11379                         0x00000000, 0x00000070 },
11380                 { MAC_TX_LENGTHS, 0x0000,
11381                         0x00000000, 0x00003fff },
11382                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11383                         0x00000000, 0x000007fc },
11384                 { MAC_RX_MODE, TG3_FL_5705,
11385                         0x00000000, 0x000007dc },
11386                 { MAC_HASH_REG_0, 0x0000,
11387                         0x00000000, 0xffffffff },
11388                 { MAC_HASH_REG_1, 0x0000,
11389                         0x00000000, 0xffffffff },
11390                 { MAC_HASH_REG_2, 0x0000,
11391                         0x00000000, 0xffffffff },
11392                 { MAC_HASH_REG_3, 0x0000,
11393                         0x00000000, 0xffffffff },
11394
11395                 /* Receive Data and Receive BD Initiator Control Registers. */
11396                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11397                         0x00000000, 0xffffffff },
11398                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11399                         0x00000000, 0xffffffff },
11400                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11401                         0x00000000, 0x00000003 },
11402                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11403                         0x00000000, 0xffffffff },
11404                 { RCVDBDI_STD_BD+0, 0x0000,
11405                         0x00000000, 0xffffffff },
11406                 { RCVDBDI_STD_BD+4, 0x0000,
11407                         0x00000000, 0xffffffff },
11408                 { RCVDBDI_STD_BD+8, 0x0000,
11409                         0x00000000, 0xffff0002 },
11410                 { RCVDBDI_STD_BD+0xc, 0x0000,
11411                         0x00000000, 0xffffffff },
11412
11413                 /* Receive BD Initiator Control Registers. */
11414                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11415                         0x00000000, 0xffffffff },
11416                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11417                         0x00000000, 0x000003ff },
11418                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11419                         0x00000000, 0xffffffff },
11420
11421                 /* Host Coalescing Control Registers. */
11422                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11423                         0x00000000, 0x00000004 },
11424                 { HOSTCC_MODE, TG3_FL_5705,
11425                         0x00000000, 0x000000f6 },
11426                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11427                         0x00000000, 0xffffffff },
11428                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11429                         0x00000000, 0x000003ff },
11430                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11431                         0x00000000, 0xffffffff },
11432                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11433                         0x00000000, 0x000003ff },
11434                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11435                         0x00000000, 0xffffffff },
11436                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11437                         0x00000000, 0x000000ff },
11438                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11439                         0x00000000, 0xffffffff },
11440                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11441                         0x00000000, 0x000000ff },
11442                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11443                         0x00000000, 0xffffffff },
11444                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11445                         0x00000000, 0xffffffff },
11446                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11447                         0x00000000, 0xffffffff },
11448                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11449                         0x00000000, 0x000000ff },
11450                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11451                         0x00000000, 0xffffffff },
11452                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11453                         0x00000000, 0x000000ff },
11454                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11455                         0x00000000, 0xffffffff },
11456                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11457                         0x00000000, 0xffffffff },
11458                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11459                         0x00000000, 0xffffffff },
11460                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11461                         0x00000000, 0xffffffff },
11462                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11463                         0x00000000, 0xffffffff },
11464                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11465                         0xffffffff, 0x00000000 },
11466                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11467                         0xffffffff, 0x00000000 },
11468
11469                 /* Buffer Manager Control Registers. */
11470                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11471                         0x00000000, 0x007fff80 },
11472                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11473                         0x00000000, 0x007fffff },
11474                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11475                         0x00000000, 0x0000003f },
11476                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11477                         0x00000000, 0x000001ff },
11478                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11479                         0x00000000, 0x000001ff },
11480                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11481                         0xffffffff, 0x00000000 },
11482                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11483                         0xffffffff, 0x00000000 },
11484
11485                 /* Mailbox Registers */
11486                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11487                         0x00000000, 0x000001ff },
11488                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11489                         0x00000000, 0x000001ff },
11490                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11491                         0x00000000, 0x000007ff },
11492                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11493                         0x00000000, 0x000001ff },
11494
11495                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11496         };
11497
11498         is_5705 = is_5750 = 0;
11499         if (tg3_flag(tp, 5705_PLUS)) {
11500                 is_5705 = 1;
11501                 if (tg3_flag(tp, 5750_PLUS))
11502                         is_5750 = 1;
11503         }
11504
11505         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11506                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11507                         continue;
11508
11509                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11510                         continue;
11511
11512                 if (tg3_flag(tp, IS_5788) &&
11513                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11514                         continue;
11515
11516                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11517                         continue;
11518
11519                 offset = (u32) reg_tbl[i].offset;
11520                 read_mask = reg_tbl[i].read_mask;
11521                 write_mask = reg_tbl[i].write_mask;
11522
11523                 /* Save the original register content */
11524                 save_val = tr32(offset);
11525
11526                 /* Determine the read-only value. */
11527                 read_val = save_val & read_mask;
11528
11529                 /* Write zero to the register, then make sure the read-only bits
11530                  * are not changed and the read/write bits are all zeros.
11531                  */
11532                 tw32(offset, 0);
11533
11534                 val = tr32(offset);
11535
11536                 /* Test the read-only and read/write bits. */
11537                 if (((val & read_mask) != read_val) || (val & write_mask))
11538                         goto out;
11539
11540                 /* Write ones to all the bits defined by RdMask and WrMask, then
11541                  * make sure the read-only bits are not changed and the
11542                  * read/write bits are all ones.
11543                  */
11544                 tw32(offset, read_mask | write_mask);
11545
11546                 val = tr32(offset);
11547
11548                 /* Test the read-only bits. */
11549                 if ((val & read_mask) != read_val)
11550                         goto out;
11551
11552                 /* Test the read/write bits. */
11553                 if ((val & write_mask) != write_mask)
11554                         goto out;
11555
11556                 tw32(offset, save_val);
11557         }
11558
11559         return 0;
11560
11561 out:
11562         if (netif_msg_hw(tp))
11563                 netdev_err(tp->dev,
11564                            "Register test failed at offset %x\n", offset);
11565         tw32(offset, save_val);
11566         return -EIO;
11567 }
11568
11569 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11570 {
11571         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11572         int i;
11573         u32 j;
11574
11575         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11576                 for (j = 0; j < len; j += 4) {
11577                         u32 val;
11578
11579                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11580                         tg3_read_mem(tp, offset + j, &val);
11581                         if (val != test_pattern[i])
11582                                 return -EIO;
11583                 }
11584         }
11585         return 0;
11586 }
11587
11588 static int tg3_test_memory(struct tg3 *tp)
11589 {
11590         static struct mem_entry {
11591                 u32 offset;
11592                 u32 len;
11593         } mem_tbl_570x[] = {
11594                 { 0x00000000, 0x00b50},
11595                 { 0x00002000, 0x1c000},
11596                 { 0xffffffff, 0x00000}
11597         }, mem_tbl_5705[] = {
11598                 { 0x00000100, 0x0000c},
11599                 { 0x00000200, 0x00008},
11600                 { 0x00004000, 0x00800},
11601                 { 0x00006000, 0x01000},
11602                 { 0x00008000, 0x02000},
11603                 { 0x00010000, 0x0e000},
11604                 { 0xffffffff, 0x00000}
11605         }, mem_tbl_5755[] = {
11606                 { 0x00000200, 0x00008},
11607                 { 0x00004000, 0x00800},
11608                 { 0x00006000, 0x00800},
11609                 { 0x00008000, 0x02000},
11610                 { 0x00010000, 0x0c000},
11611                 { 0xffffffff, 0x00000}
11612         }, mem_tbl_5906[] = {
11613                 { 0x00000200, 0x00008},
11614                 { 0x00004000, 0x00400},
11615                 { 0x00006000, 0x00400},
11616                 { 0x00008000, 0x01000},
11617                 { 0x00010000, 0x01000},
11618                 { 0xffffffff, 0x00000}
11619         }, mem_tbl_5717[] = {
11620                 { 0x00000200, 0x00008},
11621                 { 0x00010000, 0x0a000},
11622                 { 0x00020000, 0x13c00},
11623                 { 0xffffffff, 0x00000}
11624         }, mem_tbl_57765[] = {
11625                 { 0x00000200, 0x00008},
11626                 { 0x00004000, 0x00800},
11627                 { 0x00006000, 0x09800},
11628                 { 0x00010000, 0x0a000},
11629                 { 0xffffffff, 0x00000}
11630         };
11631         struct mem_entry *mem_tbl;
11632         int err = 0;
11633         int i;
11634
11635         if (tg3_flag(tp, 5717_PLUS))
11636                 mem_tbl = mem_tbl_5717;
11637         else if (tg3_flag(tp, 57765_CLASS))
11638                 mem_tbl = mem_tbl_57765;
11639         else if (tg3_flag(tp, 5755_PLUS))
11640                 mem_tbl = mem_tbl_5755;
11641         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11642                 mem_tbl = mem_tbl_5906;
11643         else if (tg3_flag(tp, 5705_PLUS))
11644                 mem_tbl = mem_tbl_5705;
11645         else
11646                 mem_tbl = mem_tbl_570x;
11647
11648         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11649                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11650                 if (err)
11651                         break;
11652         }
11653
11654         return err;
11655 }
11656
11657 #define TG3_TSO_MSS             500
11658
11659 #define TG3_TSO_IP_HDR_LEN      20
11660 #define TG3_TSO_TCP_HDR_LEN     20
11661 #define TG3_TSO_TCP_OPT_LEN     12
11662
11663 static const u8 tg3_tso_header[] = {
11664 0x08, 0x00,
11665 0x45, 0x00, 0x00, 0x00,
11666 0x00, 0x00, 0x40, 0x00,
11667 0x40, 0x06, 0x00, 0x00,
11668 0x0a, 0x00, 0x00, 0x01,
11669 0x0a, 0x00, 0x00, 0x02,
11670 0x0d, 0x00, 0xe0, 0x00,
11671 0x00, 0x00, 0x01, 0x00,
11672 0x00, 0x00, 0x02, 0x00,
11673 0x80, 0x10, 0x10, 0x00,
11674 0x14, 0x09, 0x00, 0x00,
11675 0x01, 0x01, 0x08, 0x0a,
11676 0x11, 0x11, 0x11, 0x11,
11677 0x11, 0x11, 0x11, 0x11,
11678 };
11679
11680 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11681 {
11682         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11683         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11684         u32 budget;
11685         struct sk_buff *skb;
11686         u8 *tx_data, *rx_data;
11687         dma_addr_t map;
11688         int num_pkts, tx_len, rx_len, i, err;
11689         struct tg3_rx_buffer_desc *desc;
11690         struct tg3_napi *tnapi, *rnapi;
11691         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11692
11693         tnapi = &tp->napi[0];
11694         rnapi = &tp->napi[0];
11695         if (tp->irq_cnt > 1) {
11696                 if (tg3_flag(tp, ENABLE_RSS))
11697                         rnapi = &tp->napi[1];
11698                 if (tg3_flag(tp, ENABLE_TSS))
11699                         tnapi = &tp->napi[1];
11700         }
11701         coal_now = tnapi->coal_now | rnapi->coal_now;
11702
11703         err = -EIO;
11704
11705         tx_len = pktsz;
11706         skb = netdev_alloc_skb(tp->dev, tx_len);
11707         if (!skb)
11708                 return -ENOMEM;
11709
11710         tx_data = skb_put(skb, tx_len);
11711         memcpy(tx_data, tp->dev->dev_addr, 6);
11712         memset(tx_data + 6, 0x0, 8);
11713
11714         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11715
11716         if (tso_loopback) {
11717                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11718
11719                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11720                               TG3_TSO_TCP_OPT_LEN;
11721
11722                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11723                        sizeof(tg3_tso_header));
11724                 mss = TG3_TSO_MSS;
11725
11726                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11727                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11728
11729                 /* Set the total length field in the IP header */
11730                 iph->tot_len = htons((u16)(mss + hdr_len));
11731
11732                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11733                               TXD_FLAG_CPU_POST_DMA);
11734
11735                 if (tg3_flag(tp, HW_TSO_1) ||
11736                     tg3_flag(tp, HW_TSO_2) ||
11737                     tg3_flag(tp, HW_TSO_3)) {
11738                         struct tcphdr *th;
11739                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11740                         th = (struct tcphdr *)&tx_data[val];
11741                         th->check = 0;
11742                 } else
11743                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11744
11745                 if (tg3_flag(tp, HW_TSO_3)) {
11746                         mss |= (hdr_len & 0xc) << 12;
11747                         if (hdr_len & 0x10)
11748                                 base_flags |= 0x00000010;
11749                         base_flags |= (hdr_len & 0x3e0) << 5;
11750                 } else if (tg3_flag(tp, HW_TSO_2))
11751                         mss |= hdr_len << 9;
11752                 else if (tg3_flag(tp, HW_TSO_1) ||
11753                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11754                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11755                 } else {
11756                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11757                 }
11758
11759                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11760         } else {
11761                 num_pkts = 1;
11762                 data_off = ETH_HLEN;
11763         }
11764
11765         for (i = data_off; i < tx_len; i++)
11766                 tx_data[i] = (u8) (i & 0xff);
11767
11768         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11769         if (pci_dma_mapping_error(tp->pdev, map)) {
11770                 dev_kfree_skb(skb);
11771                 return -EIO;
11772         }
11773
11774         val = tnapi->tx_prod;
11775         tnapi->tx_buffers[val].skb = skb;
11776         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11777
11778         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11779                rnapi->coal_now);
11780
11781         udelay(10);
11782
11783         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11784
11785         budget = tg3_tx_avail(tnapi);
11786         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11787                             base_flags | TXD_FLAG_END, mss, 0)) {
11788                 tnapi->tx_buffers[val].skb = NULL;
11789                 dev_kfree_skb(skb);
11790                 return -EIO;
11791         }
11792
11793         tnapi->tx_prod++;
11794
11795         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11796         tr32_mailbox(tnapi->prodmbox);
11797
11798         udelay(10);
11799
11800         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11801         for (i = 0; i < 35; i++) {
11802                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11803                        coal_now);
11804
11805                 udelay(10);
11806
11807                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11808                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11809                 if ((tx_idx == tnapi->tx_prod) &&
11810                     (rx_idx == (rx_start_idx + num_pkts)))
11811                         break;
11812         }
11813
11814         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11815         dev_kfree_skb(skb);
11816
11817         if (tx_idx != tnapi->tx_prod)
11818                 goto out;
11819
11820         if (rx_idx != rx_start_idx + num_pkts)
11821                 goto out;
11822
11823         val = data_off;
11824         while (rx_idx != rx_start_idx) {
11825                 desc = &rnapi->rx_rcb[rx_start_idx++];
11826                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11827                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11828
11829                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11830                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11831                         goto out;
11832
11833                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11834                          - ETH_FCS_LEN;
11835
11836                 if (!tso_loopback) {
11837                         if (rx_len != tx_len)
11838                                 goto out;
11839
11840                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11841                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11842                                         goto out;
11843                         } else {
11844                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11845                                         goto out;
11846                         }
11847                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11848                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11849                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11850                         goto out;
11851                 }
11852
11853                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11854                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11855                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11856                                              mapping);
11857                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11858                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11859                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11860                                              mapping);
11861                 } else
11862                         goto out;
11863
11864                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11865                                             PCI_DMA_FROMDEVICE);
11866
11867                 rx_data += TG3_RX_OFFSET(tp);
11868                 for (i = data_off; i < rx_len; i++, val++) {
11869                         if (*(rx_data + i) != (u8) (val & 0xff))
11870                                 goto out;
11871                 }
11872         }
11873
11874         err = 0;
11875
11876         /* tg3_free_rings will unmap and free the rx_data */
11877 out:
11878         return err;
11879 }
11880
11881 #define TG3_STD_LOOPBACK_FAILED         1
11882 #define TG3_JMB_LOOPBACK_FAILED         2
11883 #define TG3_TSO_LOOPBACK_FAILED         4
11884 #define TG3_LOOPBACK_FAILED \
11885         (TG3_STD_LOOPBACK_FAILED | \
11886          TG3_JMB_LOOPBACK_FAILED | \
11887          TG3_TSO_LOOPBACK_FAILED)
11888
11889 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11890 {
11891         int err = -EIO;
11892         u32 eee_cap;
11893
11894         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11895         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11896
11897         if (!netif_running(tp->dev)) {
11898                 data[0] = TG3_LOOPBACK_FAILED;
11899                 data[1] = TG3_LOOPBACK_FAILED;
11900                 if (do_extlpbk)
11901                         data[2] = TG3_LOOPBACK_FAILED;
11902                 goto done;
11903         }
11904
11905         err = tg3_reset_hw(tp, 1);
11906         if (err) {
11907                 data[0] = TG3_LOOPBACK_FAILED;
11908                 data[1] = TG3_LOOPBACK_FAILED;
11909                 if (do_extlpbk)
11910                         data[2] = TG3_LOOPBACK_FAILED;
11911                 goto done;
11912         }
11913
11914         if (tg3_flag(tp, ENABLE_RSS)) {
11915                 int i;
11916
11917                 /* Reroute all rx packets to the 1st queue */
11918                 for (i = MAC_RSS_INDIR_TBL_0;
11919                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11920                         tw32(i, 0x0);
11921         }
11922
11923         /* HW errata - mac loopback fails in some cases on 5780.
11924          * Normal traffic and PHY loopback are not affected by
11925          * errata.  Also, the MAC loopback test is deprecated for
11926          * all newer ASIC revisions.
11927          */
11928         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11929             !tg3_flag(tp, CPMU_PRESENT)) {
11930                 tg3_mac_loopback(tp, true);
11931
11932                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11933                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11934
11935                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11936                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11937                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11938
11939                 tg3_mac_loopback(tp, false);
11940         }
11941
11942         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11943             !tg3_flag(tp, USE_PHYLIB)) {
11944                 int i;
11945
11946                 tg3_phy_lpbk_set(tp, 0, false);
11947
11948                 /* Wait for link */
11949                 for (i = 0; i < 100; i++) {
11950                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11951                                 break;
11952                         mdelay(1);
11953                 }
11954
11955                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11956                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11957                 if (tg3_flag(tp, TSO_CAPABLE) &&
11958                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11959                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11960                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11961                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11962                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11963
11964                 if (do_extlpbk) {
11965                         tg3_phy_lpbk_set(tp, 0, true);
11966
11967                         /* All link indications report up, but the hardware
11968                          * isn't really ready for about 20 msec.  Double it
11969                          * to be sure.
11970                          */
11971                         mdelay(40);
11972
11973                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11975                         if (tg3_flag(tp, TSO_CAPABLE) &&
11976                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11978                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11980                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11981                 }
11982
11983                 /* Re-enable gphy autopowerdown. */
11984                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11985                         tg3_phy_toggle_apd(tp, true);
11986         }
11987
11988         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11989
11990 done:
11991         tp->phy_flags |= eee_cap;
11992
11993         return err;
11994 }
11995
11996 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11997                           u64 *data)
11998 {
11999         struct tg3 *tp = netdev_priv(dev);
12000         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12001
12002         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12003             tg3_power_up(tp)) {
12004                 etest->flags |= ETH_TEST_FL_FAILED;
12005                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12006                 return;
12007         }
12008
12009         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12010
12011         if (tg3_test_nvram(tp) != 0) {
12012                 etest->flags |= ETH_TEST_FL_FAILED;
12013                 data[0] = 1;
12014         }
12015         if (!doextlpbk && tg3_test_link(tp)) {
12016                 etest->flags |= ETH_TEST_FL_FAILED;
12017                 data[1] = 1;
12018         }
12019         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12020                 int err, err2 = 0, irq_sync = 0;
12021
12022                 if (netif_running(dev)) {
12023                         tg3_phy_stop(tp);
12024                         tg3_netif_stop(tp);
12025                         irq_sync = 1;
12026                 }
12027
12028                 tg3_full_lock(tp, irq_sync);
12029
12030                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12031                 err = tg3_nvram_lock(tp);
12032                 tg3_halt_cpu(tp, RX_CPU_BASE);
12033                 if (!tg3_flag(tp, 5705_PLUS))
12034                         tg3_halt_cpu(tp, TX_CPU_BASE);
12035                 if (!err)
12036                         tg3_nvram_unlock(tp);
12037
12038                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12039                         tg3_phy_reset(tp);
12040
12041                 if (tg3_test_registers(tp) != 0) {
12042                         etest->flags |= ETH_TEST_FL_FAILED;
12043                         data[2] = 1;
12044                 }
12045
12046                 if (tg3_test_memory(tp) != 0) {
12047                         etest->flags |= ETH_TEST_FL_FAILED;
12048                         data[3] = 1;
12049                 }
12050
12051                 if (doextlpbk)
12052                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12053
12054                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12055                         etest->flags |= ETH_TEST_FL_FAILED;
12056
12057                 tg3_full_unlock(tp);
12058
12059                 if (tg3_test_interrupt(tp) != 0) {
12060                         etest->flags |= ETH_TEST_FL_FAILED;
12061                         data[7] = 1;
12062                 }
12063
12064                 tg3_full_lock(tp, 0);
12065
12066                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12067                 if (netif_running(dev)) {
12068                         tg3_flag_set(tp, INIT_COMPLETE);
12069                         err2 = tg3_restart_hw(tp, 1);
12070                         if (!err2)
12071                                 tg3_netif_start(tp);
12072                 }
12073
12074                 tg3_full_unlock(tp);
12075
12076                 if (irq_sync && !err2)
12077                         tg3_phy_start(tp);
12078         }
12079         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12080                 tg3_power_down(tp);
12081
12082 }
12083
12084 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12085 {
12086         struct mii_ioctl_data *data = if_mii(ifr);
12087         struct tg3 *tp = netdev_priv(dev);
12088         int err;
12089
12090         if (tg3_flag(tp, USE_PHYLIB)) {
12091                 struct phy_device *phydev;
12092                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12093                         return -EAGAIN;
12094                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12095                 return phy_mii_ioctl(phydev, ifr, cmd);
12096         }
12097
12098         switch (cmd) {
12099         case SIOCGMIIPHY:
12100                 data->phy_id = tp->phy_addr;
12101
12102                 /* fallthru */
12103         case SIOCGMIIREG: {
12104                 u32 mii_regval;
12105
12106                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12107                         break;                  /* We have no PHY */
12108
12109                 if (!netif_running(dev))
12110                         return -EAGAIN;
12111
12112                 spin_lock_bh(&tp->lock);
12113                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12114                 spin_unlock_bh(&tp->lock);
12115
12116                 data->val_out = mii_regval;
12117
12118                 return err;
12119         }
12120
12121         case SIOCSMIIREG:
12122                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12123                         break;                  /* We have no PHY */
12124
12125                 if (!netif_running(dev))
12126                         return -EAGAIN;
12127
12128                 spin_lock_bh(&tp->lock);
12129                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12130                 spin_unlock_bh(&tp->lock);
12131
12132                 return err;
12133
12134         default:
12135                 /* do nothing */
12136                 break;
12137         }
12138         return -EOPNOTSUPP;
12139 }
12140
12141 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12142 {
12143         struct tg3 *tp = netdev_priv(dev);
12144
12145         memcpy(ec, &tp->coal, sizeof(*ec));
12146         return 0;
12147 }
12148
12149 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12150 {
12151         struct tg3 *tp = netdev_priv(dev);
12152         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12153         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12154
12155         if (!tg3_flag(tp, 5705_PLUS)) {
12156                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12157                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12158                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12159                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12160         }
12161
12162         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12163             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12164             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12165             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12166             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12167             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12168             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12169             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12170             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12171             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12172                 return -EINVAL;
12173
12174         /* No rx interrupts will be generated if both are zero */
12175         if ((ec->rx_coalesce_usecs == 0) &&
12176             (ec->rx_max_coalesced_frames == 0))
12177                 return -EINVAL;
12178
12179         /* No tx interrupts will be generated if both are zero */
12180         if ((ec->tx_coalesce_usecs == 0) &&
12181             (ec->tx_max_coalesced_frames == 0))
12182                 return -EINVAL;
12183
12184         /* Only copy relevant parameters, ignore all others. */
12185         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12186         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12187         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12188         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12189         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12190         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12191         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12192         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12193         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12194
12195         if (netif_running(dev)) {
12196                 tg3_full_lock(tp, 0);
12197                 __tg3_set_coalesce(tp, &tp->coal);
12198                 tg3_full_unlock(tp);
12199         }
12200         return 0;
12201 }
12202
12203 static const struct ethtool_ops tg3_ethtool_ops = {
12204         .get_settings           = tg3_get_settings,
12205         .set_settings           = tg3_set_settings,
12206         .get_drvinfo            = tg3_get_drvinfo,
12207         .get_regs_len           = tg3_get_regs_len,
12208         .get_regs               = tg3_get_regs,
12209         .get_wol                = tg3_get_wol,
12210         .set_wol                = tg3_set_wol,
12211         .get_msglevel           = tg3_get_msglevel,
12212         .set_msglevel           = tg3_set_msglevel,
12213         .nway_reset             = tg3_nway_reset,
12214         .get_link               = ethtool_op_get_link,
12215         .get_eeprom_len         = tg3_get_eeprom_len,
12216         .get_eeprom             = tg3_get_eeprom,
12217         .set_eeprom             = tg3_set_eeprom,
12218         .get_ringparam          = tg3_get_ringparam,
12219         .set_ringparam          = tg3_set_ringparam,
12220         .get_pauseparam         = tg3_get_pauseparam,
12221         .set_pauseparam         = tg3_set_pauseparam,
12222         .self_test              = tg3_self_test,
12223         .get_strings            = tg3_get_strings,
12224         .set_phys_id            = tg3_set_phys_id,
12225         .get_ethtool_stats      = tg3_get_ethtool_stats,
12226         .get_coalesce           = tg3_get_coalesce,
12227         .set_coalesce           = tg3_set_coalesce,
12228         .get_sset_count         = tg3_get_sset_count,
12229         .get_rxnfc              = tg3_get_rxnfc,
12230         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12231         .get_rxfh_indir         = tg3_get_rxfh_indir,
12232         .set_rxfh_indir         = tg3_set_rxfh_indir,
12233 };
12234
12235 static void tg3_set_rx_mode(struct net_device *dev)
12236 {
12237         struct tg3 *tp = netdev_priv(dev);
12238
12239         if (!netif_running(dev))
12240                 return;
12241
12242         tg3_full_lock(tp, 0);
12243         __tg3_set_rx_mode(dev);
12244         tg3_full_unlock(tp);
12245 }
12246
12247 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12248                                int new_mtu)
12249 {
12250         dev->mtu = new_mtu;
12251
12252         if (new_mtu > ETH_DATA_LEN) {
12253                 if (tg3_flag(tp, 5780_CLASS)) {
12254                         netdev_update_features(dev);
12255                         tg3_flag_clear(tp, TSO_CAPABLE);
12256                 } else {
12257                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12258                 }
12259         } else {
12260                 if (tg3_flag(tp, 5780_CLASS)) {
12261                         tg3_flag_set(tp, TSO_CAPABLE);
12262                         netdev_update_features(dev);
12263                 }
12264                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12265         }
12266 }
12267
12268 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12269 {
12270         struct tg3 *tp = netdev_priv(dev);
12271         int err;
12272
12273         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12274                 return -EINVAL;
12275
12276         if (!netif_running(dev)) {
12277                 /* We'll just catch it later when the
12278                  * device is up'd.
12279                  */
12280                 tg3_set_mtu(dev, tp, new_mtu);
12281                 return 0;
12282         }
12283
12284         tg3_phy_stop(tp);
12285
12286         tg3_netif_stop(tp);
12287
12288         tg3_full_lock(tp, 1);
12289
12290         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12291
12292         tg3_set_mtu(dev, tp, new_mtu);
12293
12294         err = tg3_restart_hw(tp, 0);
12295
12296         if (!err)
12297                 tg3_netif_start(tp);
12298
12299         tg3_full_unlock(tp);
12300
12301         if (!err)
12302                 tg3_phy_start(tp);
12303
12304         return err;
12305 }
12306
12307 static const struct net_device_ops tg3_netdev_ops = {
12308         .ndo_open               = tg3_open,
12309         .ndo_stop               = tg3_close,
12310         .ndo_start_xmit         = tg3_start_xmit,
12311         .ndo_get_stats64        = tg3_get_stats64,
12312         .ndo_validate_addr      = eth_validate_addr,
12313         .ndo_set_rx_mode        = tg3_set_rx_mode,
12314         .ndo_set_mac_address    = tg3_set_mac_addr,
12315         .ndo_do_ioctl           = tg3_ioctl,
12316         .ndo_tx_timeout         = tg3_tx_timeout,
12317         .ndo_change_mtu         = tg3_change_mtu,
12318         .ndo_fix_features       = tg3_fix_features,
12319         .ndo_set_features       = tg3_set_features,
12320 #ifdef CONFIG_NET_POLL_CONTROLLER
12321         .ndo_poll_controller    = tg3_poll_controller,
12322 #endif
12323 };
12324
12325 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12326 {
12327         u32 cursize, val, magic;
12328
12329         tp->nvram_size = EEPROM_CHIP_SIZE;
12330
12331         if (tg3_nvram_read(tp, 0, &magic) != 0)
12332                 return;
12333
12334         if ((magic != TG3_EEPROM_MAGIC) &&
12335             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12336             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12337                 return;
12338
12339         /*
12340          * Size the chip by reading offsets at increasing powers of two.
12341          * When we encounter our validation signature, we know the addressing
12342          * has wrapped around, and thus have our chip size.
12343          */
12344         cursize = 0x10;
12345
12346         while (cursize < tp->nvram_size) {
12347                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12348                         return;
12349
12350                 if (val == magic)
12351                         break;
12352
12353                 cursize <<= 1;
12354         }
12355
12356         tp->nvram_size = cursize;
12357 }
12358
12359 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12360 {
12361         u32 val;
12362
12363         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12364                 return;
12365
12366         /* Selfboot format */
12367         if (val != TG3_EEPROM_MAGIC) {
12368                 tg3_get_eeprom_size(tp);
12369                 return;
12370         }
12371
12372         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12373                 if (val != 0) {
12374                         /* This is confusing.  We want to operate on the
12375                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12376                          * call will read from NVRAM and byteswap the data
12377                          * according to the byteswapping settings for all
12378                          * other register accesses.  This ensures the data we
12379                          * want will always reside in the lower 16-bits.
12380                          * However, the data in NVRAM is in LE format, which
12381                          * means the data from the NVRAM read will always be
12382                          * opposite the endianness of the CPU.  The 16-bit
12383                          * byteswap then brings the data to CPU endianness.
12384                          */
12385                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12386                         return;
12387                 }
12388         }
12389         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12390 }
12391
12392 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12393 {
12394         u32 nvcfg1;
12395
12396         nvcfg1 = tr32(NVRAM_CFG1);
12397         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12398                 tg3_flag_set(tp, FLASH);
12399         } else {
12400                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12401                 tw32(NVRAM_CFG1, nvcfg1);
12402         }
12403
12404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12405             tg3_flag(tp, 5780_CLASS)) {
12406                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12407                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12408                         tp->nvram_jedecnum = JEDEC_ATMEL;
12409                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12410                         tg3_flag_set(tp, NVRAM_BUFFERED);
12411                         break;
12412                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12413                         tp->nvram_jedecnum = JEDEC_ATMEL;
12414                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12415                         break;
12416                 case FLASH_VENDOR_ATMEL_EEPROM:
12417                         tp->nvram_jedecnum = JEDEC_ATMEL;
12418                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12419                         tg3_flag_set(tp, NVRAM_BUFFERED);
12420                         break;
12421                 case FLASH_VENDOR_ST:
12422                         tp->nvram_jedecnum = JEDEC_ST;
12423                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12424                         tg3_flag_set(tp, NVRAM_BUFFERED);
12425                         break;
12426                 case FLASH_VENDOR_SAIFUN:
12427                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12428                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12429                         break;
12430                 case FLASH_VENDOR_SST_SMALL:
12431                 case FLASH_VENDOR_SST_LARGE:
12432                         tp->nvram_jedecnum = JEDEC_SST;
12433                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12434                         break;
12435                 }
12436         } else {
12437                 tp->nvram_jedecnum = JEDEC_ATMEL;
12438                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12439                 tg3_flag_set(tp, NVRAM_BUFFERED);
12440         }
12441 }
12442
12443 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12444 {
12445         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12446         case FLASH_5752PAGE_SIZE_256:
12447                 tp->nvram_pagesize = 256;
12448                 break;
12449         case FLASH_5752PAGE_SIZE_512:
12450                 tp->nvram_pagesize = 512;
12451                 break;
12452         case FLASH_5752PAGE_SIZE_1K:
12453                 tp->nvram_pagesize = 1024;
12454                 break;
12455         case FLASH_5752PAGE_SIZE_2K:
12456                 tp->nvram_pagesize = 2048;
12457                 break;
12458         case FLASH_5752PAGE_SIZE_4K:
12459                 tp->nvram_pagesize = 4096;
12460                 break;
12461         case FLASH_5752PAGE_SIZE_264:
12462                 tp->nvram_pagesize = 264;
12463                 break;
12464         case FLASH_5752PAGE_SIZE_528:
12465                 tp->nvram_pagesize = 528;
12466                 break;
12467         }
12468 }
12469
12470 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12471 {
12472         u32 nvcfg1;
12473
12474         nvcfg1 = tr32(NVRAM_CFG1);
12475
12476         /* NVRAM protection for TPM */
12477         if (nvcfg1 & (1 << 27))
12478                 tg3_flag_set(tp, PROTECTED_NVRAM);
12479
12480         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12481         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12482         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12483                 tp->nvram_jedecnum = JEDEC_ATMEL;
12484                 tg3_flag_set(tp, NVRAM_BUFFERED);
12485                 break;
12486         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12487                 tp->nvram_jedecnum = JEDEC_ATMEL;
12488                 tg3_flag_set(tp, NVRAM_BUFFERED);
12489                 tg3_flag_set(tp, FLASH);
12490                 break;
12491         case FLASH_5752VENDOR_ST_M45PE10:
12492         case FLASH_5752VENDOR_ST_M45PE20:
12493         case FLASH_5752VENDOR_ST_M45PE40:
12494                 tp->nvram_jedecnum = JEDEC_ST;
12495                 tg3_flag_set(tp, NVRAM_BUFFERED);
12496                 tg3_flag_set(tp, FLASH);
12497                 break;
12498         }
12499
12500         if (tg3_flag(tp, FLASH)) {
12501                 tg3_nvram_get_pagesize(tp, nvcfg1);
12502         } else {
12503                 /* For eeprom, set pagesize to maximum eeprom size */
12504                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12505
12506                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12507                 tw32(NVRAM_CFG1, nvcfg1);
12508         }
12509 }
12510
12511 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12512 {
12513         u32 nvcfg1, protect = 0;
12514
12515         nvcfg1 = tr32(NVRAM_CFG1);
12516
12517         /* NVRAM protection for TPM */
12518         if (nvcfg1 & (1 << 27)) {
12519                 tg3_flag_set(tp, PROTECTED_NVRAM);
12520                 protect = 1;
12521         }
12522
12523         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12524         switch (nvcfg1) {
12525         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12526         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12527         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12528         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12529                 tp->nvram_jedecnum = JEDEC_ATMEL;
12530                 tg3_flag_set(tp, NVRAM_BUFFERED);
12531                 tg3_flag_set(tp, FLASH);
12532                 tp->nvram_pagesize = 264;
12533                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12534                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12535                         tp->nvram_size = (protect ? 0x3e200 :
12536                                           TG3_NVRAM_SIZE_512KB);
12537                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12538                         tp->nvram_size = (protect ? 0x1f200 :
12539                                           TG3_NVRAM_SIZE_256KB);
12540                 else
12541                         tp->nvram_size = (protect ? 0x1f200 :
12542                                           TG3_NVRAM_SIZE_128KB);
12543                 break;
12544         case FLASH_5752VENDOR_ST_M45PE10:
12545         case FLASH_5752VENDOR_ST_M45PE20:
12546         case FLASH_5752VENDOR_ST_M45PE40:
12547                 tp->nvram_jedecnum = JEDEC_ST;
12548                 tg3_flag_set(tp, NVRAM_BUFFERED);
12549                 tg3_flag_set(tp, FLASH);
12550                 tp->nvram_pagesize = 256;
12551                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12552                         tp->nvram_size = (protect ?
12553                                           TG3_NVRAM_SIZE_64KB :
12554                                           TG3_NVRAM_SIZE_128KB);
12555                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12556                         tp->nvram_size = (protect ?
12557                                           TG3_NVRAM_SIZE_64KB :
12558                                           TG3_NVRAM_SIZE_256KB);
12559                 else
12560                         tp->nvram_size = (protect ?
12561                                           TG3_NVRAM_SIZE_128KB :
12562                                           TG3_NVRAM_SIZE_512KB);
12563                 break;
12564         }
12565 }
12566
12567 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12568 {
12569         u32 nvcfg1;
12570
12571         nvcfg1 = tr32(NVRAM_CFG1);
12572
12573         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12574         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12575         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12576         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12577         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12578                 tp->nvram_jedecnum = JEDEC_ATMEL;
12579                 tg3_flag_set(tp, NVRAM_BUFFERED);
12580                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12581
12582                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12583                 tw32(NVRAM_CFG1, nvcfg1);
12584                 break;
12585         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12586         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12587         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12588         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12589                 tp->nvram_jedecnum = JEDEC_ATMEL;
12590                 tg3_flag_set(tp, NVRAM_BUFFERED);
12591                 tg3_flag_set(tp, FLASH);
12592                 tp->nvram_pagesize = 264;
12593                 break;
12594         case FLASH_5752VENDOR_ST_M45PE10:
12595         case FLASH_5752VENDOR_ST_M45PE20:
12596         case FLASH_5752VENDOR_ST_M45PE40:
12597                 tp->nvram_jedecnum = JEDEC_ST;
12598                 tg3_flag_set(tp, NVRAM_BUFFERED);
12599                 tg3_flag_set(tp, FLASH);
12600                 tp->nvram_pagesize = 256;
12601                 break;
12602         }
12603 }
12604
12605 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12606 {
12607         u32 nvcfg1, protect = 0;
12608
12609         nvcfg1 = tr32(NVRAM_CFG1);
12610
12611         /* NVRAM protection for TPM */
12612         if (nvcfg1 & (1 << 27)) {
12613                 tg3_flag_set(tp, PROTECTED_NVRAM);
12614                 protect = 1;
12615         }
12616
12617         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12618         switch (nvcfg1) {
12619         case FLASH_5761VENDOR_ATMEL_ADB021D:
12620         case FLASH_5761VENDOR_ATMEL_ADB041D:
12621         case FLASH_5761VENDOR_ATMEL_ADB081D:
12622         case FLASH_5761VENDOR_ATMEL_ADB161D:
12623         case FLASH_5761VENDOR_ATMEL_MDB021D:
12624         case FLASH_5761VENDOR_ATMEL_MDB041D:
12625         case FLASH_5761VENDOR_ATMEL_MDB081D:
12626         case FLASH_5761VENDOR_ATMEL_MDB161D:
12627                 tp->nvram_jedecnum = JEDEC_ATMEL;
12628                 tg3_flag_set(tp, NVRAM_BUFFERED);
12629                 tg3_flag_set(tp, FLASH);
12630                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12631                 tp->nvram_pagesize = 256;
12632                 break;
12633         case FLASH_5761VENDOR_ST_A_M45PE20:
12634         case FLASH_5761VENDOR_ST_A_M45PE40:
12635         case FLASH_5761VENDOR_ST_A_M45PE80:
12636         case FLASH_5761VENDOR_ST_A_M45PE16:
12637         case FLASH_5761VENDOR_ST_M_M45PE20:
12638         case FLASH_5761VENDOR_ST_M_M45PE40:
12639         case FLASH_5761VENDOR_ST_M_M45PE80:
12640         case FLASH_5761VENDOR_ST_M_M45PE16:
12641                 tp->nvram_jedecnum = JEDEC_ST;
12642                 tg3_flag_set(tp, NVRAM_BUFFERED);
12643                 tg3_flag_set(tp, FLASH);
12644                 tp->nvram_pagesize = 256;
12645                 break;
12646         }
12647
12648         if (protect) {
12649                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12650         } else {
12651                 switch (nvcfg1) {
12652                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12653                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12654                 case FLASH_5761VENDOR_ST_A_M45PE16:
12655                 case FLASH_5761VENDOR_ST_M_M45PE16:
12656                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12657                         break;
12658                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12659                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12660                 case FLASH_5761VENDOR_ST_A_M45PE80:
12661                 case FLASH_5761VENDOR_ST_M_M45PE80:
12662                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12663                         break;
12664                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12665                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12666                 case FLASH_5761VENDOR_ST_A_M45PE40:
12667                 case FLASH_5761VENDOR_ST_M_M45PE40:
12668                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12669                         break;
12670                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12671                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12672                 case FLASH_5761VENDOR_ST_A_M45PE20:
12673                 case FLASH_5761VENDOR_ST_M_M45PE20:
12674                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12675                         break;
12676                 }
12677         }
12678 }
12679
12680 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12681 {
12682         tp->nvram_jedecnum = JEDEC_ATMEL;
12683         tg3_flag_set(tp, NVRAM_BUFFERED);
12684         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12685 }
12686
12687 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12688 {
12689         u32 nvcfg1;
12690
12691         nvcfg1 = tr32(NVRAM_CFG1);
12692
12693         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12694         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12695         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12696                 tp->nvram_jedecnum = JEDEC_ATMEL;
12697                 tg3_flag_set(tp, NVRAM_BUFFERED);
12698                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12699
12700                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12701                 tw32(NVRAM_CFG1, nvcfg1);
12702                 return;
12703         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12704         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12705         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12706         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12707         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12708         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12709         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12710                 tp->nvram_jedecnum = JEDEC_ATMEL;
12711                 tg3_flag_set(tp, NVRAM_BUFFERED);
12712                 tg3_flag_set(tp, FLASH);
12713
12714                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12715                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12716                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12717                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12718                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12719                         break;
12720                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12721                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12722                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12723                         break;
12724                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12725                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12726                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12727                         break;
12728                 }
12729                 break;
12730         case FLASH_5752VENDOR_ST_M45PE10:
12731         case FLASH_5752VENDOR_ST_M45PE20:
12732         case FLASH_5752VENDOR_ST_M45PE40:
12733                 tp->nvram_jedecnum = JEDEC_ST;
12734                 tg3_flag_set(tp, NVRAM_BUFFERED);
12735                 tg3_flag_set(tp, FLASH);
12736
12737                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12738                 case FLASH_5752VENDOR_ST_M45PE10:
12739                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12740                         break;
12741                 case FLASH_5752VENDOR_ST_M45PE20:
12742                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12743                         break;
12744                 case FLASH_5752VENDOR_ST_M45PE40:
12745                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12746                         break;
12747                 }
12748                 break;
12749         default:
12750                 tg3_flag_set(tp, NO_NVRAM);
12751                 return;
12752         }
12753
12754         tg3_nvram_get_pagesize(tp, nvcfg1);
12755         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12756                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12757 }
12758
12759
12760 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12761 {
12762         u32 nvcfg1;
12763
12764         nvcfg1 = tr32(NVRAM_CFG1);
12765
12766         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12767         case FLASH_5717VENDOR_ATMEL_EEPROM:
12768         case FLASH_5717VENDOR_MICRO_EEPROM:
12769                 tp->nvram_jedecnum = JEDEC_ATMEL;
12770                 tg3_flag_set(tp, NVRAM_BUFFERED);
12771                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12772
12773                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12774                 tw32(NVRAM_CFG1, nvcfg1);
12775                 return;
12776         case FLASH_5717VENDOR_ATMEL_MDB011D:
12777         case FLASH_5717VENDOR_ATMEL_ADB011B:
12778         case FLASH_5717VENDOR_ATMEL_ADB011D:
12779         case FLASH_5717VENDOR_ATMEL_MDB021D:
12780         case FLASH_5717VENDOR_ATMEL_ADB021B:
12781         case FLASH_5717VENDOR_ATMEL_ADB021D:
12782         case FLASH_5717VENDOR_ATMEL_45USPT:
12783                 tp->nvram_jedecnum = JEDEC_ATMEL;
12784                 tg3_flag_set(tp, NVRAM_BUFFERED);
12785                 tg3_flag_set(tp, FLASH);
12786
12787                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12788                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12789                         /* Detect size with tg3_nvram_get_size() */
12790                         break;
12791                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12792                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12793                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12794                         break;
12795                 default:
12796                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12797                         break;
12798                 }
12799                 break;
12800         case FLASH_5717VENDOR_ST_M_M25PE10:
12801         case FLASH_5717VENDOR_ST_A_M25PE10:
12802         case FLASH_5717VENDOR_ST_M_M45PE10:
12803         case FLASH_5717VENDOR_ST_A_M45PE10:
12804         case FLASH_5717VENDOR_ST_M_M25PE20:
12805         case FLASH_5717VENDOR_ST_A_M25PE20:
12806         case FLASH_5717VENDOR_ST_M_M45PE20:
12807         case FLASH_5717VENDOR_ST_A_M45PE20:
12808         case FLASH_5717VENDOR_ST_25USPT:
12809         case FLASH_5717VENDOR_ST_45USPT:
12810                 tp->nvram_jedecnum = JEDEC_ST;
12811                 tg3_flag_set(tp, NVRAM_BUFFERED);
12812                 tg3_flag_set(tp, FLASH);
12813
12814                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12815                 case FLASH_5717VENDOR_ST_M_M25PE20:
12816                 case FLASH_5717VENDOR_ST_M_M45PE20:
12817                         /* Detect size with tg3_nvram_get_size() */
12818                         break;
12819                 case FLASH_5717VENDOR_ST_A_M25PE20:
12820                 case FLASH_5717VENDOR_ST_A_M45PE20:
12821                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12822                         break;
12823                 default:
12824                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12825                         break;
12826                 }
12827                 break;
12828         default:
12829                 tg3_flag_set(tp, NO_NVRAM);
12830                 return;
12831         }
12832
12833         tg3_nvram_get_pagesize(tp, nvcfg1);
12834         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12835                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12836 }
12837
12838 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12839 {
12840         u32 nvcfg1, nvmpinstrp;
12841
12842         nvcfg1 = tr32(NVRAM_CFG1);
12843         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12844
12845         switch (nvmpinstrp) {
12846         case FLASH_5720_EEPROM_HD:
12847         case FLASH_5720_EEPROM_LD:
12848                 tp->nvram_jedecnum = JEDEC_ATMEL;
12849                 tg3_flag_set(tp, NVRAM_BUFFERED);
12850
12851                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12852                 tw32(NVRAM_CFG1, nvcfg1);
12853                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12854                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12855                 else
12856                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12857                 return;
12858         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12859         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12860         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12861         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12862         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12863         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12864         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12865         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12866         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12867         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12868         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12869         case FLASH_5720VENDOR_ATMEL_45USPT:
12870                 tp->nvram_jedecnum = JEDEC_ATMEL;
12871                 tg3_flag_set(tp, NVRAM_BUFFERED);
12872                 tg3_flag_set(tp, FLASH);
12873
12874                 switch (nvmpinstrp) {
12875                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12876                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12877                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12878                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12879                         break;
12880                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12881                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12882                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12883                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12884                         break;
12885                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12886                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12887                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12888                         break;
12889                 default:
12890                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12891                         break;
12892                 }
12893                 break;
12894         case FLASH_5720VENDOR_M_ST_M25PE10:
12895         case FLASH_5720VENDOR_M_ST_M45PE10:
12896         case FLASH_5720VENDOR_A_ST_M25PE10:
12897         case FLASH_5720VENDOR_A_ST_M45PE10:
12898         case FLASH_5720VENDOR_M_ST_M25PE20:
12899         case FLASH_5720VENDOR_M_ST_M45PE20:
12900         case FLASH_5720VENDOR_A_ST_M25PE20:
12901         case FLASH_5720VENDOR_A_ST_M45PE20:
12902         case FLASH_5720VENDOR_M_ST_M25PE40:
12903         case FLASH_5720VENDOR_M_ST_M45PE40:
12904         case FLASH_5720VENDOR_A_ST_M25PE40:
12905         case FLASH_5720VENDOR_A_ST_M45PE40:
12906         case FLASH_5720VENDOR_M_ST_M25PE80:
12907         case FLASH_5720VENDOR_M_ST_M45PE80:
12908         case FLASH_5720VENDOR_A_ST_M25PE80:
12909         case FLASH_5720VENDOR_A_ST_M45PE80:
12910         case FLASH_5720VENDOR_ST_25USPT:
12911         case FLASH_5720VENDOR_ST_45USPT:
12912                 tp->nvram_jedecnum = JEDEC_ST;
12913                 tg3_flag_set(tp, NVRAM_BUFFERED);
12914                 tg3_flag_set(tp, FLASH);
12915
12916                 switch (nvmpinstrp) {
12917                 case FLASH_5720VENDOR_M_ST_M25PE20:
12918                 case FLASH_5720VENDOR_M_ST_M45PE20:
12919                 case FLASH_5720VENDOR_A_ST_M25PE20:
12920                 case FLASH_5720VENDOR_A_ST_M45PE20:
12921                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12922                         break;
12923                 case FLASH_5720VENDOR_M_ST_M25PE40:
12924                 case FLASH_5720VENDOR_M_ST_M45PE40:
12925                 case FLASH_5720VENDOR_A_ST_M25PE40:
12926                 case FLASH_5720VENDOR_A_ST_M45PE40:
12927                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12928                         break;
12929                 case FLASH_5720VENDOR_M_ST_M25PE80:
12930                 case FLASH_5720VENDOR_M_ST_M45PE80:
12931                 case FLASH_5720VENDOR_A_ST_M25PE80:
12932                 case FLASH_5720VENDOR_A_ST_M45PE80:
12933                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12934                         break;
12935                 default:
12936                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12937                         break;
12938                 }
12939                 break;
12940         default:
12941                 tg3_flag_set(tp, NO_NVRAM);
12942                 return;
12943         }
12944
12945         tg3_nvram_get_pagesize(tp, nvcfg1);
12946         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12947                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12948 }
12949
12950 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12951 static void __devinit tg3_nvram_init(struct tg3 *tp)
12952 {
12953         tw32_f(GRC_EEPROM_ADDR,
12954              (EEPROM_ADDR_FSM_RESET |
12955               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12956                EEPROM_ADDR_CLKPERD_SHIFT)));
12957
12958         msleep(1);
12959
12960         /* Enable seeprom accesses. */
12961         tw32_f(GRC_LOCAL_CTRL,
12962              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12963         udelay(100);
12964
12965         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12966             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12967                 tg3_flag_set(tp, NVRAM);
12968
12969                 if (tg3_nvram_lock(tp)) {
12970                         netdev_warn(tp->dev,
12971                                     "Cannot get nvram lock, %s failed\n",
12972                                     __func__);
12973                         return;
12974                 }
12975                 tg3_enable_nvram_access(tp);
12976
12977                 tp->nvram_size = 0;
12978
12979                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12980                         tg3_get_5752_nvram_info(tp);
12981                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12982                         tg3_get_5755_nvram_info(tp);
12983                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12984                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12985                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12986                         tg3_get_5787_nvram_info(tp);
12987                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12988                         tg3_get_5761_nvram_info(tp);
12989                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12990                         tg3_get_5906_nvram_info(tp);
12991                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12992                          tg3_flag(tp, 57765_CLASS))
12993                         tg3_get_57780_nvram_info(tp);
12994                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12995                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12996                         tg3_get_5717_nvram_info(tp);
12997                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12998                         tg3_get_5720_nvram_info(tp);
12999                 else
13000                         tg3_get_nvram_info(tp);
13001
13002                 if (tp->nvram_size == 0)
13003                         tg3_get_nvram_size(tp);
13004
13005                 tg3_disable_nvram_access(tp);
13006                 tg3_nvram_unlock(tp);
13007
13008         } else {
13009                 tg3_flag_clear(tp, NVRAM);
13010                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13011
13012                 tg3_get_eeprom_size(tp);
13013         }
13014 }
13015
13016 struct subsys_tbl_ent {
13017         u16 subsys_vendor, subsys_devid;
13018         u32 phy_id;
13019 };
13020
13021 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13022         /* Broadcom boards. */
13023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13026           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13028           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13030           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13032           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13034           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13035         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13036           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13037         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13038           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13039         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13040           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13041         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13042           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13043         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13044           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13045
13046         /* 3com boards. */
13047         { TG3PCI_SUBVENDOR_ID_3COM,
13048           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13049         { TG3PCI_SUBVENDOR_ID_3COM,
13050           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13051         { TG3PCI_SUBVENDOR_ID_3COM,
13052           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13053         { TG3PCI_SUBVENDOR_ID_3COM,
13054           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13055         { TG3PCI_SUBVENDOR_ID_3COM,
13056           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13057
13058         /* DELL boards. */
13059         { TG3PCI_SUBVENDOR_ID_DELL,
13060           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13061         { TG3PCI_SUBVENDOR_ID_DELL,
13062           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13063         { TG3PCI_SUBVENDOR_ID_DELL,
13064           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13065         { TG3PCI_SUBVENDOR_ID_DELL,
13066           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13067
13068         /* Compaq boards. */
13069         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13070           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13071         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13072           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13073         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13074           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13075         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13076           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13077         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13078           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13079
13080         /* IBM boards. */
13081         { TG3PCI_SUBVENDOR_ID_IBM,
13082           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13083 };
13084
13085 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13086 {
13087         int i;
13088
13089         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13090                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13091                      tp->pdev->subsystem_vendor) &&
13092                     (subsys_id_to_phy_id[i].subsys_devid ==
13093                      tp->pdev->subsystem_device))
13094                         return &subsys_id_to_phy_id[i];
13095         }
13096         return NULL;
13097 }
13098
13099 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13100 {
13101         u32 val;
13102
13103         tp->phy_id = TG3_PHY_ID_INVALID;
13104         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13105
13106         /* Assume an onboard device and WOL capable by default.  */
13107         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13108         tg3_flag_set(tp, WOL_CAP);
13109
13110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13111                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13112                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13113                         tg3_flag_set(tp, IS_NIC);
13114                 }
13115                 val = tr32(VCPU_CFGSHDW);
13116                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13117                         tg3_flag_set(tp, ASPM_WORKAROUND);
13118                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13119                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13120                         tg3_flag_set(tp, WOL_ENABLE);
13121                         device_set_wakeup_enable(&tp->pdev->dev, true);
13122                 }
13123                 goto done;
13124         }
13125
13126         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13127         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13128                 u32 nic_cfg, led_cfg;
13129                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13130                 int eeprom_phy_serdes = 0;
13131
13132                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13133                 tp->nic_sram_data_cfg = nic_cfg;
13134
13135                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13136                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13138                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13139                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13140                     (ver > 0) && (ver < 0x100))
13141                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13142
13143                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13144                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13145
13146                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13147                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13148                         eeprom_phy_serdes = 1;
13149
13150                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13151                 if (nic_phy_id != 0) {
13152                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13153                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13154
13155                         eeprom_phy_id  = (id1 >> 16) << 10;
13156                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13157                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13158                 } else
13159                         eeprom_phy_id = 0;
13160
13161                 tp->phy_id = eeprom_phy_id;
13162                 if (eeprom_phy_serdes) {
13163                         if (!tg3_flag(tp, 5705_PLUS))
13164                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13165                         else
13166                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13167                 }
13168
13169                 if (tg3_flag(tp, 5750_PLUS))
13170                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13171                                     SHASTA_EXT_LED_MODE_MASK);
13172                 else
13173                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13174
13175                 switch (led_cfg) {
13176                 default:
13177                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13178                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13179                         break;
13180
13181                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13182                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13183                         break;
13184
13185                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13186                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13187
13188                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13189                          * read on some older 5700/5701 bootcode.
13190                          */
13191                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13192                             ASIC_REV_5700 ||
13193                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13194                             ASIC_REV_5701)
13195                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13196
13197                         break;
13198
13199                 case SHASTA_EXT_LED_SHARED:
13200                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13201                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13202                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13203                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13204                                                  LED_CTRL_MODE_PHY_2);
13205                         break;
13206
13207                 case SHASTA_EXT_LED_MAC:
13208                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13209                         break;
13210
13211                 case SHASTA_EXT_LED_COMBO:
13212                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13213                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13214                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13215                                                  LED_CTRL_MODE_PHY_2);
13216                         break;
13217
13218                 }
13219
13220                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13221                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13222                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13223                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13224
13225                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13226                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13227
13228                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13229                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13230                         if ((tp->pdev->subsystem_vendor ==
13231                              PCI_VENDOR_ID_ARIMA) &&
13232                             (tp->pdev->subsystem_device == 0x205a ||
13233                              tp->pdev->subsystem_device == 0x2063))
13234                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13235                 } else {
13236                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13237                         tg3_flag_set(tp, IS_NIC);
13238                 }
13239
13240                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13241                         tg3_flag_set(tp, ENABLE_ASF);
13242                         if (tg3_flag(tp, 5750_PLUS))
13243                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13244                 }
13245
13246                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13247                     tg3_flag(tp, 5750_PLUS))
13248                         tg3_flag_set(tp, ENABLE_APE);
13249
13250                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13251                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13252                         tg3_flag_clear(tp, WOL_CAP);
13253
13254                 if (tg3_flag(tp, WOL_CAP) &&
13255                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13256                         tg3_flag_set(tp, WOL_ENABLE);
13257                         device_set_wakeup_enable(&tp->pdev->dev, true);
13258                 }
13259
13260                 if (cfg2 & (1 << 17))
13261                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13262
13263                 /* serdes signal pre-emphasis in register 0x590 set by */
13264                 /* bootcode if bit 18 is set */
13265                 if (cfg2 & (1 << 18))
13266                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13267
13268                 if ((tg3_flag(tp, 57765_PLUS) ||
13269                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13270                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13271                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13272                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13273
13274                 if (tg3_flag(tp, PCI_EXPRESS) &&
13275                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13276                     !tg3_flag(tp, 57765_PLUS)) {
13277                         u32 cfg3;
13278
13279                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13280                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13281                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13282                 }
13283
13284                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13285                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13286                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13287                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13288                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13289                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13290         }
13291 done:
13292         if (tg3_flag(tp, WOL_CAP))
13293                 device_set_wakeup_enable(&tp->pdev->dev,
13294                                          tg3_flag(tp, WOL_ENABLE));
13295         else
13296                 device_set_wakeup_capable(&tp->pdev->dev, false);
13297 }
13298
13299 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13300 {
13301         int i;
13302         u32 val;
13303
13304         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13305         tw32(OTP_CTRL, cmd);
13306
13307         /* Wait for up to 1 ms for command to execute. */
13308         for (i = 0; i < 100; i++) {
13309                 val = tr32(OTP_STATUS);
13310                 if (val & OTP_STATUS_CMD_DONE)
13311                         break;
13312                 udelay(10);
13313         }
13314
13315         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13316 }
13317
13318 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13319  * configuration is a 32-bit value that straddles the alignment boundary.
13320  * We do two 32-bit reads and then shift and merge the results.
13321  */
13322 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13323 {
13324         u32 bhalf_otp, thalf_otp;
13325
13326         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13327
13328         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13329                 return 0;
13330
13331         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13332
13333         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13334                 return 0;
13335
13336         thalf_otp = tr32(OTP_READ_DATA);
13337
13338         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13339
13340         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13341                 return 0;
13342
13343         bhalf_otp = tr32(OTP_READ_DATA);
13344
13345         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13346 }
13347
13348 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13349 {
13350         u32 adv = ADVERTISED_Autoneg;
13351
13352         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13353                 adv |= ADVERTISED_1000baseT_Half |
13354                        ADVERTISED_1000baseT_Full;
13355
13356         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13357                 adv |= ADVERTISED_100baseT_Half |
13358                        ADVERTISED_100baseT_Full |
13359                        ADVERTISED_10baseT_Half |
13360                        ADVERTISED_10baseT_Full |
13361                        ADVERTISED_TP;
13362         else
13363                 adv |= ADVERTISED_FIBRE;
13364
13365         tp->link_config.advertising = adv;
13366         tp->link_config.speed = SPEED_INVALID;
13367         tp->link_config.duplex = DUPLEX_INVALID;
13368         tp->link_config.autoneg = AUTONEG_ENABLE;
13369         tp->link_config.active_speed = SPEED_INVALID;
13370         tp->link_config.active_duplex = DUPLEX_INVALID;
13371         tp->link_config.orig_speed = SPEED_INVALID;
13372         tp->link_config.orig_duplex = DUPLEX_INVALID;
13373         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13374 }
13375
13376 static int __devinit tg3_phy_probe(struct tg3 *tp)
13377 {
13378         u32 hw_phy_id_1, hw_phy_id_2;
13379         u32 hw_phy_id, hw_phy_id_masked;
13380         int err;
13381
13382         /* flow control autonegotiation is default behavior */
13383         tg3_flag_set(tp, PAUSE_AUTONEG);
13384         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13385
13386         if (tg3_flag(tp, USE_PHYLIB))
13387                 return tg3_phy_init(tp);
13388
13389         /* Reading the PHY ID register can conflict with ASF
13390          * firmware access to the PHY hardware.
13391          */
13392         err = 0;
13393         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13394                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13395         } else {
13396                 /* Now read the physical PHY_ID from the chip and verify
13397                  * that it is sane.  If it doesn't look good, we fall back
13398                  * to either the hard-coded table based PHY_ID and failing
13399                  * that the value found in the eeprom area.
13400                  */
13401                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13402                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13403
13404                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13405                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13406                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13407
13408                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13409         }
13410
13411         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13412                 tp->phy_id = hw_phy_id;
13413                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13414                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13415                 else
13416                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13417         } else {
13418                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13419                         /* Do nothing, phy ID already set up in
13420                          * tg3_get_eeprom_hw_cfg().
13421                          */
13422                 } else {
13423                         struct subsys_tbl_ent *p;
13424
13425                         /* No eeprom signature?  Try the hardcoded
13426                          * subsys device table.
13427                          */
13428                         p = tg3_lookup_by_subsys(tp);
13429                         if (!p)
13430                                 return -ENODEV;
13431
13432                         tp->phy_id = p->phy_id;
13433                         if (!tp->phy_id ||
13434                             tp->phy_id == TG3_PHY_ID_BCM8002)
13435                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13436                 }
13437         }
13438
13439         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13440             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13441              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13442              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13443               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13444              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13445               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13446                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13447
13448         tg3_phy_init_link_config(tp);
13449
13450         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13451             !tg3_flag(tp, ENABLE_APE) &&
13452             !tg3_flag(tp, ENABLE_ASF)) {
13453                 u32 bmsr, dummy;
13454
13455                 tg3_readphy(tp, MII_BMSR, &bmsr);
13456                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13457                     (bmsr & BMSR_LSTATUS))
13458                         goto skip_phy_reset;
13459
13460                 err = tg3_phy_reset(tp);
13461                 if (err)
13462                         return err;
13463
13464                 tg3_phy_set_wirespeed(tp);
13465
13466                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13467                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13468                                             tp->link_config.flowctrl);
13469
13470                         tg3_writephy(tp, MII_BMCR,
13471                                      BMCR_ANENABLE | BMCR_ANRESTART);
13472                 }
13473         }
13474
13475 skip_phy_reset:
13476         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13477                 err = tg3_init_5401phy_dsp(tp);
13478                 if (err)
13479                         return err;
13480
13481                 err = tg3_init_5401phy_dsp(tp);
13482         }
13483
13484         return err;
13485 }
13486
13487 static void __devinit tg3_read_vpd(struct tg3 *tp)
13488 {
13489         u8 *vpd_data;
13490         unsigned int block_end, rosize, len;
13491         u32 vpdlen;
13492         int j, i = 0;
13493
13494         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13495         if (!vpd_data)
13496                 goto out_no_vpd;
13497
13498         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13499         if (i < 0)
13500                 goto out_not_found;
13501
13502         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13503         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13504         i += PCI_VPD_LRDT_TAG_SIZE;
13505
13506         if (block_end > vpdlen)
13507                 goto out_not_found;
13508
13509         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13510                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13511         if (j > 0) {
13512                 len = pci_vpd_info_field_size(&vpd_data[j]);
13513
13514                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13515                 if (j + len > block_end || len != 4 ||
13516                     memcmp(&vpd_data[j], "1028", 4))
13517                         goto partno;
13518
13519                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13520                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13521                 if (j < 0)
13522                         goto partno;
13523
13524                 len = pci_vpd_info_field_size(&vpd_data[j]);
13525
13526                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13527                 if (j + len > block_end)
13528                         goto partno;
13529
13530                 memcpy(tp->fw_ver, &vpd_data[j], len);
13531                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13532         }
13533
13534 partno:
13535         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13536                                       PCI_VPD_RO_KEYWORD_PARTNO);
13537         if (i < 0)
13538                 goto out_not_found;
13539
13540         len = pci_vpd_info_field_size(&vpd_data[i]);
13541
13542         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13543         if (len > TG3_BPN_SIZE ||
13544             (len + i) > vpdlen)
13545                 goto out_not_found;
13546
13547         memcpy(tp->board_part_number, &vpd_data[i], len);
13548
13549 out_not_found:
13550         kfree(vpd_data);
13551         if (tp->board_part_number[0])
13552                 return;
13553
13554 out_no_vpd:
13555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13556                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13557                         strcpy(tp->board_part_number, "BCM5717");
13558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13559                         strcpy(tp->board_part_number, "BCM5718");
13560                 else
13561                         goto nomatch;
13562         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13563                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13564                         strcpy(tp->board_part_number, "BCM57780");
13565                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13566                         strcpy(tp->board_part_number, "BCM57760");
13567                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13568                         strcpy(tp->board_part_number, "BCM57790");
13569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13570                         strcpy(tp->board_part_number, "BCM57788");
13571                 else
13572                         goto nomatch;
13573         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13574                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13575                         strcpy(tp->board_part_number, "BCM57761");
13576                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13577                         strcpy(tp->board_part_number, "BCM57765");
13578                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13579                         strcpy(tp->board_part_number, "BCM57781");
13580                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13581                         strcpy(tp->board_part_number, "BCM57785");
13582                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13583                         strcpy(tp->board_part_number, "BCM57791");
13584                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13585                         strcpy(tp->board_part_number, "BCM57795");
13586                 else
13587                         goto nomatch;
13588         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13589                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13590                         strcpy(tp->board_part_number, "BCM57762");
13591                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13592                         strcpy(tp->board_part_number, "BCM57766");
13593                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13594                         strcpy(tp->board_part_number, "BCM57782");
13595                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13596                         strcpy(tp->board_part_number, "BCM57786");
13597                 else
13598                         goto nomatch;
13599         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13600                 strcpy(tp->board_part_number, "BCM95906");
13601         } else {
13602 nomatch:
13603                 strcpy(tp->board_part_number, "none");
13604         }
13605 }
13606
13607 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13608 {
13609         u32 val;
13610
13611         if (tg3_nvram_read(tp, offset, &val) ||
13612             (val & 0xfc000000) != 0x0c000000 ||
13613             tg3_nvram_read(tp, offset + 4, &val) ||
13614             val != 0)
13615                 return 0;
13616
13617         return 1;
13618 }
13619
13620 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13621 {
13622         u32 val, offset, start, ver_offset;
13623         int i, dst_off;
13624         bool newver = false;
13625
13626         if (tg3_nvram_read(tp, 0xc, &offset) ||
13627             tg3_nvram_read(tp, 0x4, &start))
13628                 return;
13629
13630         offset = tg3_nvram_logical_addr(tp, offset);
13631
13632         if (tg3_nvram_read(tp, offset, &val))
13633                 return;
13634
13635         if ((val & 0xfc000000) == 0x0c000000) {
13636                 if (tg3_nvram_read(tp, offset + 4, &val))
13637                         return;
13638
13639                 if (val == 0)
13640                         newver = true;
13641         }
13642
13643         dst_off = strlen(tp->fw_ver);
13644
13645         if (newver) {
13646                 if (TG3_VER_SIZE - dst_off < 16 ||
13647                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13648                         return;
13649
13650                 offset = offset + ver_offset - start;
13651                 for (i = 0; i < 16; i += 4) {
13652                         __be32 v;
13653                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13654                                 return;
13655
13656                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13657                 }
13658         } else {
13659                 u32 major, minor;
13660
13661                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13662                         return;
13663
13664                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13665                         TG3_NVM_BCVER_MAJSFT;
13666                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13667                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13668                          "v%d.%02d", major, minor);
13669         }
13670 }
13671
13672 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13673 {
13674         u32 val, major, minor;
13675
13676         /* Use native endian representation */
13677         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13678                 return;
13679
13680         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13681                 TG3_NVM_HWSB_CFG1_MAJSFT;
13682         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13683                 TG3_NVM_HWSB_CFG1_MINSFT;
13684
13685         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13686 }
13687
13688 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13689 {
13690         u32 offset, major, minor, build;
13691
13692         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13693
13694         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13695                 return;
13696
13697         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13698         case TG3_EEPROM_SB_REVISION_0:
13699                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13700                 break;
13701         case TG3_EEPROM_SB_REVISION_2:
13702                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13703                 break;
13704         case TG3_EEPROM_SB_REVISION_3:
13705                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13706                 break;
13707         case TG3_EEPROM_SB_REVISION_4:
13708                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13709                 break;
13710         case TG3_EEPROM_SB_REVISION_5:
13711                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13712                 break;
13713         case TG3_EEPROM_SB_REVISION_6:
13714                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13715                 break;
13716         default:
13717                 return;
13718         }
13719
13720         if (tg3_nvram_read(tp, offset, &val))
13721                 return;
13722
13723         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13724                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13725         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13726                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13727         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13728
13729         if (minor > 99 || build > 26)
13730                 return;
13731
13732         offset = strlen(tp->fw_ver);
13733         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13734                  " v%d.%02d", major, minor);
13735
13736         if (build > 0) {
13737                 offset = strlen(tp->fw_ver);
13738                 if (offset < TG3_VER_SIZE - 1)
13739                         tp->fw_ver[offset] = 'a' + build - 1;
13740         }
13741 }
13742
13743 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13744 {
13745         u32 val, offset, start;
13746         int i, vlen;
13747
13748         for (offset = TG3_NVM_DIR_START;
13749              offset < TG3_NVM_DIR_END;
13750              offset += TG3_NVM_DIRENT_SIZE) {
13751                 if (tg3_nvram_read(tp, offset, &val))
13752                         return;
13753
13754                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13755                         break;
13756         }
13757
13758         if (offset == TG3_NVM_DIR_END)
13759                 return;
13760
13761         if (!tg3_flag(tp, 5705_PLUS))
13762                 start = 0x08000000;
13763         else if (tg3_nvram_read(tp, offset - 4, &start))
13764                 return;
13765
13766         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13767             !tg3_fw_img_is_valid(tp, offset) ||
13768             tg3_nvram_read(tp, offset + 8, &val))
13769                 return;
13770
13771         offset += val - start;
13772
13773         vlen = strlen(tp->fw_ver);
13774
13775         tp->fw_ver[vlen++] = ',';
13776         tp->fw_ver[vlen++] = ' ';
13777
13778         for (i = 0; i < 4; i++) {
13779                 __be32 v;
13780                 if (tg3_nvram_read_be32(tp, offset, &v))
13781                         return;
13782
13783                 offset += sizeof(v);
13784
13785                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13786                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13787                         break;
13788                 }
13789
13790                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13791                 vlen += sizeof(v);
13792         }
13793 }
13794
13795 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13796 {
13797         int vlen;
13798         u32 apedata;
13799         char *fwtype;
13800
13801         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13802                 return;
13803
13804         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13805         if (apedata != APE_SEG_SIG_MAGIC)
13806                 return;
13807
13808         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13809         if (!(apedata & APE_FW_STATUS_READY))
13810                 return;
13811
13812         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13813
13814         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13815                 tg3_flag_set(tp, APE_HAS_NCSI);
13816                 fwtype = "NCSI";
13817         } else {
13818                 fwtype = "DASH";
13819         }
13820
13821         vlen = strlen(tp->fw_ver);
13822
13823         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13824                  fwtype,
13825                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13826                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13827                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13828                  (apedata & APE_FW_VERSION_BLDMSK));
13829 }
13830
13831 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13832 {
13833         u32 val;
13834         bool vpd_vers = false;
13835
13836         if (tp->fw_ver[0] != 0)
13837                 vpd_vers = true;
13838
13839         if (tg3_flag(tp, NO_NVRAM)) {
13840                 strcat(tp->fw_ver, "sb");
13841                 return;
13842         }
13843
13844         if (tg3_nvram_read(tp, 0, &val))
13845                 return;
13846
13847         if (val == TG3_EEPROM_MAGIC)
13848                 tg3_read_bc_ver(tp);
13849         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13850                 tg3_read_sb_ver(tp, val);
13851         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13852                 tg3_read_hwsb_ver(tp);
13853         else
13854                 return;
13855
13856         if (vpd_vers)
13857                 goto done;
13858
13859         if (tg3_flag(tp, ENABLE_APE)) {
13860                 if (tg3_flag(tp, ENABLE_ASF))
13861                         tg3_read_dash_ver(tp);
13862         } else if (tg3_flag(tp, ENABLE_ASF)) {
13863                 tg3_read_mgmtfw_ver(tp);
13864         }
13865
13866 done:
13867         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13868 }
13869
13870 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13871 {
13872         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13873                 return TG3_RX_RET_MAX_SIZE_5717;
13874         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13875                 return TG3_RX_RET_MAX_SIZE_5700;
13876         else
13877                 return TG3_RX_RET_MAX_SIZE_5705;
13878 }
13879
13880 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13881         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13882         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13883         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13884         { },
13885 };
13886
13887 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13888 {
13889         struct pci_dev *peer;
13890         unsigned int func, devnr = tp->pdev->devfn & ~7;
13891
13892         for (func = 0; func < 8; func++) {
13893                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13894                 if (peer && peer != tp->pdev)
13895                         break;
13896                 pci_dev_put(peer);
13897         }
13898         /* 5704 can be configured in single-port mode, set peer to
13899          * tp->pdev in that case.
13900          */
13901         if (!peer) {
13902                 peer = tp->pdev;
13903                 return peer;
13904         }
13905
13906         /*
13907          * We don't need to keep the refcount elevated; there's no way
13908          * to remove one half of this device without removing the other
13909          */
13910         pci_dev_put(peer);
13911
13912         return peer;
13913 }
13914
13915 static int __devinit tg3_get_invariants(struct tg3 *tp)
13916 {
13917         u32 misc_ctrl_reg;
13918         u32 pci_state_reg, grc_misc_cfg;
13919         u32 val;
13920         u16 pci_cmd;
13921         int err;
13922
13923         /* Force memory write invalidate off.  If we leave it on,
13924          * then on 5700_BX chips we have to enable a workaround.
13925          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13926          * to match the cacheline size.  The Broadcom driver have this
13927          * workaround but turns MWI off all the times so never uses
13928          * it.  This seems to suggest that the workaround is insufficient.
13929          */
13930         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13931         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13932         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13933
13934         /* Important! -- Make sure register accesses are byteswapped
13935          * correctly.  Also, for those chips that require it, make
13936          * sure that indirect register accesses are enabled before
13937          * the first operation.
13938          */
13939         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13940                               &misc_ctrl_reg);
13941         tp->misc_host_ctrl |= (misc_ctrl_reg &
13942                                MISC_HOST_CTRL_CHIPREV);
13943         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13944                                tp->misc_host_ctrl);
13945
13946         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13947                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13949                 u32 prod_id_asic_rev;
13950
13951                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13952                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13953                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13954                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13955                         pci_read_config_dword(tp->pdev,
13956                                               TG3PCI_GEN2_PRODID_ASICREV,
13957                                               &prod_id_asic_rev);
13958                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13959                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13960                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13961                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13962                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13963                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13964                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13965                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13966                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13967                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13968                         pci_read_config_dword(tp->pdev,
13969                                               TG3PCI_GEN15_PRODID_ASICREV,
13970                                               &prod_id_asic_rev);
13971                 else
13972                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13973                                               &prod_id_asic_rev);
13974
13975                 tp->pci_chip_rev_id = prod_id_asic_rev;
13976         }
13977
13978         /* Wrong chip ID in 5752 A0. This code can be removed later
13979          * as A0 is not in production.
13980          */
13981         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13982                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13983
13984         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13985          * we need to disable memory and use config. cycles
13986          * only to access all registers. The 5702/03 chips
13987          * can mistakenly decode the special cycles from the
13988          * ICH chipsets as memory write cycles, causing corruption
13989          * of register and memory space. Only certain ICH bridges
13990          * will drive special cycles with non-zero data during the
13991          * address phase which can fall within the 5703's address
13992          * range. This is not an ICH bug as the PCI spec allows
13993          * non-zero address during special cycles. However, only
13994          * these ICH bridges are known to drive non-zero addresses
13995          * during special cycles.
13996          *
13997          * Since special cycles do not cross PCI bridges, we only
13998          * enable this workaround if the 5703 is on the secondary
13999          * bus of these ICH bridges.
14000          */
14001         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14002             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14003                 static struct tg3_dev_id {
14004                         u32     vendor;
14005                         u32     device;
14006                         u32     rev;
14007                 } ich_chipsets[] = {
14008                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14009                           PCI_ANY_ID },
14010                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14011                           PCI_ANY_ID },
14012                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14013                           0xa },
14014                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14015                           PCI_ANY_ID },
14016                         { },
14017                 };
14018                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14019                 struct pci_dev *bridge = NULL;
14020
14021                 while (pci_id->vendor != 0) {
14022                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14023                                                 bridge);
14024                         if (!bridge) {
14025                                 pci_id++;
14026                                 continue;
14027                         }
14028                         if (pci_id->rev != PCI_ANY_ID) {
14029                                 if (bridge->revision > pci_id->rev)
14030                                         continue;
14031                         }
14032                         if (bridge->subordinate &&
14033                             (bridge->subordinate->number ==
14034                              tp->pdev->bus->number)) {
14035                                 tg3_flag_set(tp, ICH_WORKAROUND);
14036                                 pci_dev_put(bridge);
14037                                 break;
14038                         }
14039                 }
14040         }
14041
14042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14043                 static struct tg3_dev_id {
14044                         u32     vendor;
14045                         u32     device;
14046                 } bridge_chipsets[] = {
14047                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14048                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14049                         { },
14050                 };
14051                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14052                 struct pci_dev *bridge = NULL;
14053
14054                 while (pci_id->vendor != 0) {
14055                         bridge = pci_get_device(pci_id->vendor,
14056                                                 pci_id->device,
14057                                                 bridge);
14058                         if (!bridge) {
14059                                 pci_id++;
14060                                 continue;
14061                         }
14062                         if (bridge->subordinate &&
14063                             (bridge->subordinate->number <=
14064                              tp->pdev->bus->number) &&
14065                             (bridge->subordinate->subordinate >=
14066                              tp->pdev->bus->number)) {
14067                                 tg3_flag_set(tp, 5701_DMA_BUG);
14068                                 pci_dev_put(bridge);
14069                                 break;
14070                         }
14071                 }
14072         }
14073
14074         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14075          * DMA addresses > 40-bit. This bridge may have other additional
14076          * 57xx devices behind it in some 4-port NIC designs for example.
14077          * Any tg3 device found behind the bridge will also need the 40-bit
14078          * DMA workaround.
14079          */
14080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14081             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14082                 tg3_flag_set(tp, 5780_CLASS);
14083                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14084                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14085         } else {
14086                 struct pci_dev *bridge = NULL;
14087
14088                 do {
14089                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14090                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14091                                                 bridge);
14092                         if (bridge && bridge->subordinate &&
14093                             (bridge->subordinate->number <=
14094                              tp->pdev->bus->number) &&
14095                             (bridge->subordinate->subordinate >=
14096                              tp->pdev->bus->number)) {
14097                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14098                                 pci_dev_put(bridge);
14099                                 break;
14100                         }
14101                 } while (bridge);
14102         }
14103
14104         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14106                 tp->pdev_peer = tg3_find_peer(tp);
14107
14108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14111                 tg3_flag_set(tp, 5717_PLUS);
14112
14113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14115                 tg3_flag_set(tp, 57765_CLASS);
14116
14117         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14118                 tg3_flag_set(tp, 57765_PLUS);
14119
14120         /* Intentionally exclude ASIC_REV_5906 */
14121         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14122             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14123             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14124             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14125             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14126             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14127             tg3_flag(tp, 57765_PLUS))
14128                 tg3_flag_set(tp, 5755_PLUS);
14129
14130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14131             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14132             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14133             tg3_flag(tp, 5755_PLUS) ||
14134             tg3_flag(tp, 5780_CLASS))
14135                 tg3_flag_set(tp, 5750_PLUS);
14136
14137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14138             tg3_flag(tp, 5750_PLUS))
14139                 tg3_flag_set(tp, 5705_PLUS);
14140
14141         /* Determine TSO capabilities */
14142         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14143                 ; /* Do nothing. HW bug. */
14144         else if (tg3_flag(tp, 57765_PLUS))
14145                 tg3_flag_set(tp, HW_TSO_3);
14146         else if (tg3_flag(tp, 5755_PLUS) ||
14147                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14148                 tg3_flag_set(tp, HW_TSO_2);
14149         else if (tg3_flag(tp, 5750_PLUS)) {
14150                 tg3_flag_set(tp, HW_TSO_1);
14151                 tg3_flag_set(tp, TSO_BUG);
14152                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14153                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14154                         tg3_flag_clear(tp, TSO_BUG);
14155         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14156                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14157                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14158                         tg3_flag_set(tp, TSO_BUG);
14159                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14160                         tp->fw_needed = FIRMWARE_TG3TSO5;
14161                 else
14162                         tp->fw_needed = FIRMWARE_TG3TSO;
14163         }
14164
14165         /* Selectively allow TSO based on operating conditions */
14166         if (tg3_flag(tp, HW_TSO_1) ||
14167             tg3_flag(tp, HW_TSO_2) ||
14168             tg3_flag(tp, HW_TSO_3) ||
14169             tp->fw_needed) {
14170                 /* For firmware TSO, assume ASF is disabled.
14171                  * We'll disable TSO later if we discover ASF
14172                  * is enabled in tg3_get_eeprom_hw_cfg().
14173                  */
14174                 tg3_flag_set(tp, TSO_CAPABLE);
14175         } else {
14176                 tg3_flag_clear(tp, TSO_CAPABLE);
14177                 tg3_flag_clear(tp, TSO_BUG);
14178                 tp->fw_needed = NULL;
14179         }
14180
14181         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14182                 tp->fw_needed = FIRMWARE_TG3;
14183
14184         tp->irq_max = 1;
14185
14186         if (tg3_flag(tp, 5750_PLUS)) {
14187                 tg3_flag_set(tp, SUPPORT_MSI);
14188                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14189                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14190                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14191                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14192                      tp->pdev_peer == tp->pdev))
14193                         tg3_flag_clear(tp, SUPPORT_MSI);
14194
14195                 if (tg3_flag(tp, 5755_PLUS) ||
14196                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14197                         tg3_flag_set(tp, 1SHOT_MSI);
14198                 }
14199
14200                 if (tg3_flag(tp, 57765_PLUS)) {
14201                         tg3_flag_set(tp, SUPPORT_MSIX);
14202                         tp->irq_max = TG3_IRQ_MAX_VECS;
14203                         tg3_rss_init_dflt_indir_tbl(tp);
14204                 }
14205         }
14206
14207         if (tg3_flag(tp, 5755_PLUS))
14208                 tg3_flag_set(tp, SHORT_DMA_BUG);
14209
14210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14211                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14212
14213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14214             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14215             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14216                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14217
14218         if (tg3_flag(tp, 57765_PLUS) &&
14219             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14220                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14221
14222         if (!tg3_flag(tp, 5705_PLUS) ||
14223             tg3_flag(tp, 5780_CLASS) ||
14224             tg3_flag(tp, USE_JUMBO_BDFLAG))
14225                 tg3_flag_set(tp, JUMBO_CAPABLE);
14226
14227         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14228                               &pci_state_reg);
14229
14230         if (pci_is_pcie(tp->pdev)) {
14231                 u16 lnkctl;
14232
14233                 tg3_flag_set(tp, PCI_EXPRESS);
14234
14235                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14236                         int readrq = pcie_get_readrq(tp->pdev);
14237                         if (readrq > 2048)
14238                                 pcie_set_readrq(tp->pdev, 2048);
14239                 }
14240
14241                 pci_read_config_word(tp->pdev,
14242                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14243                                      &lnkctl);
14244                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14245                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14246                             ASIC_REV_5906) {
14247                                 tg3_flag_clear(tp, HW_TSO_2);
14248                                 tg3_flag_clear(tp, TSO_CAPABLE);
14249                         }
14250                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14251                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14252                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14253                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14254                                 tg3_flag_set(tp, CLKREQ_BUG);
14255                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14256                         tg3_flag_set(tp, L1PLLPD_EN);
14257                 }
14258         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14259                 /* BCM5785 devices are effectively PCIe devices, and should
14260                  * follow PCIe codepaths, but do not have a PCIe capabilities
14261                  * section.
14262                  */
14263                 tg3_flag_set(tp, PCI_EXPRESS);
14264         } else if (!tg3_flag(tp, 5705_PLUS) ||
14265                    tg3_flag(tp, 5780_CLASS)) {
14266                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14267                 if (!tp->pcix_cap) {
14268                         dev_err(&tp->pdev->dev,
14269                                 "Cannot find PCI-X capability, aborting\n");
14270                         return -EIO;
14271                 }
14272
14273                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14274                         tg3_flag_set(tp, PCIX_MODE);
14275         }
14276
14277         /* If we have an AMD 762 or VIA K8T800 chipset, write
14278          * reordering to the mailbox registers done by the host
14279          * controller can cause major troubles.  We read back from
14280          * every mailbox register write to force the writes to be
14281          * posted to the chip in order.
14282          */
14283         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14284             !tg3_flag(tp, PCI_EXPRESS))
14285                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14286
14287         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14288                              &tp->pci_cacheline_sz);
14289         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14290                              &tp->pci_lat_timer);
14291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14292             tp->pci_lat_timer < 64) {
14293                 tp->pci_lat_timer = 64;
14294                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14295                                       tp->pci_lat_timer);
14296         }
14297
14298         /* Important! -- It is critical that the PCI-X hw workaround
14299          * situation is decided before the first MMIO register access.
14300          */
14301         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14302                 /* 5700 BX chips need to have their TX producer index
14303                  * mailboxes written twice to workaround a bug.
14304                  */
14305                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14306
14307                 /* If we are in PCI-X mode, enable register write workaround.
14308                  *
14309                  * The workaround is to use indirect register accesses
14310                  * for all chip writes not to mailbox registers.
14311                  */
14312                 if (tg3_flag(tp, PCIX_MODE)) {
14313                         u32 pm_reg;
14314
14315                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14316
14317                         /* The chip can have it's power management PCI config
14318                          * space registers clobbered due to this bug.
14319                          * So explicitly force the chip into D0 here.
14320                          */
14321                         pci_read_config_dword(tp->pdev,
14322                                               tp->pm_cap + PCI_PM_CTRL,
14323                                               &pm_reg);
14324                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14325                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14326                         pci_write_config_dword(tp->pdev,
14327                                                tp->pm_cap + PCI_PM_CTRL,
14328                                                pm_reg);
14329
14330                         /* Also, force SERR#/PERR# in PCI command. */
14331                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14332                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14333                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14334                 }
14335         }
14336
14337         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14338                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14339         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14340                 tg3_flag_set(tp, PCI_32BIT);
14341
14342         /* Chip-specific fixup from Broadcom driver */
14343         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14344             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14345                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14346                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14347         }
14348
14349         /* Default fast path register access methods */
14350         tp->read32 = tg3_read32;
14351         tp->write32 = tg3_write32;
14352         tp->read32_mbox = tg3_read32;
14353         tp->write32_mbox = tg3_write32;
14354         tp->write32_tx_mbox = tg3_write32;
14355         tp->write32_rx_mbox = tg3_write32;
14356
14357         /* Various workaround register access methods */
14358         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14359                 tp->write32 = tg3_write_indirect_reg32;
14360         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14361                  (tg3_flag(tp, PCI_EXPRESS) &&
14362                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14363                 /*
14364                  * Back to back register writes can cause problems on these
14365                  * chips, the workaround is to read back all reg writes
14366                  * except those to mailbox regs.
14367                  *
14368                  * See tg3_write_indirect_reg32().
14369                  */
14370                 tp->write32 = tg3_write_flush_reg32;
14371         }
14372
14373         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14374                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14375                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14376                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14377         }
14378
14379         if (tg3_flag(tp, ICH_WORKAROUND)) {
14380                 tp->read32 = tg3_read_indirect_reg32;
14381                 tp->write32 = tg3_write_indirect_reg32;
14382                 tp->read32_mbox = tg3_read_indirect_mbox;
14383                 tp->write32_mbox = tg3_write_indirect_mbox;
14384                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14385                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14386
14387                 iounmap(tp->regs);
14388                 tp->regs = NULL;
14389
14390                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14391                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14392                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14393         }
14394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14395                 tp->read32_mbox = tg3_read32_mbox_5906;
14396                 tp->write32_mbox = tg3_write32_mbox_5906;
14397                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14398                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14399         }
14400
14401         if (tp->write32 == tg3_write_indirect_reg32 ||
14402             (tg3_flag(tp, PCIX_MODE) &&
14403              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14404               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14405                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14406
14407         /* The memory arbiter has to be enabled in order for SRAM accesses
14408          * to succeed.  Normally on powerup the tg3 chip firmware will make
14409          * sure it is enabled, but other entities such as system netboot
14410          * code might disable it.
14411          */
14412         val = tr32(MEMARB_MODE);
14413         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14414
14415         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14417             tg3_flag(tp, 5780_CLASS)) {
14418                 if (tg3_flag(tp, PCIX_MODE)) {
14419                         pci_read_config_dword(tp->pdev,
14420                                               tp->pcix_cap + PCI_X_STATUS,
14421                                               &val);
14422                         tp->pci_fn = val & 0x7;
14423                 }
14424         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14425                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14426                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14427                     NIC_SRAM_CPMUSTAT_SIG) {
14428                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14429                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14430                 }
14431         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14432                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14433                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14434                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14435                     NIC_SRAM_CPMUSTAT_SIG) {
14436                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14437                                      TG3_CPMU_STATUS_FSHFT_5719;
14438                 }
14439         }
14440
14441         /* Get eeprom hw config before calling tg3_set_power_state().
14442          * In particular, the TG3_FLAG_IS_NIC flag must be
14443          * determined before calling tg3_set_power_state() so that
14444          * we know whether or not to switch out of Vaux power.
14445          * When the flag is set, it means that GPIO1 is used for eeprom
14446          * write protect and also implies that it is a LOM where GPIOs
14447          * are not used to switch power.
14448          */
14449         tg3_get_eeprom_hw_cfg(tp);
14450
14451         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14452                 tg3_flag_clear(tp, TSO_CAPABLE);
14453                 tg3_flag_clear(tp, TSO_BUG);
14454                 tp->fw_needed = NULL;
14455         }
14456
14457         if (tg3_flag(tp, ENABLE_APE)) {
14458                 /* Allow reads and writes to the
14459                  * APE register and memory space.
14460                  */
14461                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14462                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14463                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14464                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14465                                        pci_state_reg);
14466
14467                 tg3_ape_lock_init(tp);
14468         }
14469
14470         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14471             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14474             tg3_flag(tp, 57765_PLUS))
14475                 tg3_flag_set(tp, CPMU_PRESENT);
14476
14477         /* Set up tp->grc_local_ctrl before calling
14478          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14479          * will bring 5700's external PHY out of reset.
14480          * It is also used as eeprom write protect on LOMs.
14481          */
14482         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14484             tg3_flag(tp, EEPROM_WRITE_PROT))
14485                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14486                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14487         /* Unused GPIO3 must be driven as output on 5752 because there
14488          * are no pull-up resistors on unused GPIO pins.
14489          */
14490         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14491                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14492
14493         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14494             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14495             tg3_flag(tp, 57765_CLASS))
14496                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14497
14498         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14499             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14500                 /* Turn off the debug UART. */
14501                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14502                 if (tg3_flag(tp, IS_NIC))
14503                         /* Keep VMain power. */
14504                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14505                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14506         }
14507
14508         /* Switch out of Vaux if it is a NIC */
14509         tg3_pwrsrc_switch_to_vmain(tp);
14510
14511         /* Derive initial jumbo mode from MTU assigned in
14512          * ether_setup() via the alloc_etherdev() call
14513          */
14514         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14515                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14516
14517         /* Determine WakeOnLan speed to use. */
14518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14519             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14520             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14521             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14522                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14523         } else {
14524                 tg3_flag_set(tp, WOL_SPEED_100MB);
14525         }
14526
14527         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14528                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14529
14530         /* A few boards don't want Ethernet@WireSpeed phy feature */
14531         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14532             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14533              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14534              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14535             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14536             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14537                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14538
14539         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14540             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14541                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14542         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14543                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14544
14545         if (tg3_flag(tp, 5705_PLUS) &&
14546             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14547             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14548             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14549             !tg3_flag(tp, 57765_PLUS)) {
14550                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14551                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14552                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14553                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14554                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14555                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14556                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14557                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14558                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14559                 } else
14560                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14561         }
14562
14563         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14564             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14565                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14566                 if (tp->phy_otp == 0)
14567                         tp->phy_otp = TG3_OTP_DEFAULT;
14568         }
14569
14570         if (tg3_flag(tp, CPMU_PRESENT))
14571                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14572         else
14573                 tp->mi_mode = MAC_MI_MODE_BASE;
14574
14575         tp->coalesce_mode = 0;
14576         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14577             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14578                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14579
14580         /* Set these bits to enable statistics workaround. */
14581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14582             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14583             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14584                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14585                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14586         }
14587
14588         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14589             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14590                 tg3_flag_set(tp, USE_PHYLIB);
14591
14592         err = tg3_mdio_init(tp);
14593         if (err)
14594                 return err;
14595
14596         /* Initialize data/descriptor byte/word swapping. */
14597         val = tr32(GRC_MODE);
14598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14599                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14600                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14601                         GRC_MODE_B2HRX_ENABLE |
14602                         GRC_MODE_HTX2B_ENABLE |
14603                         GRC_MODE_HOST_STACKUP);
14604         else
14605                 val &= GRC_MODE_HOST_STACKUP;
14606
14607         tw32(GRC_MODE, val | tp->grc_mode);
14608
14609         tg3_switch_clocks(tp);
14610
14611         /* Clear this out for sanity. */
14612         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14613
14614         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14615                               &pci_state_reg);
14616         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14617             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14618                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14619
14620                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14621                     chiprevid == CHIPREV_ID_5701_B0 ||
14622                     chiprevid == CHIPREV_ID_5701_B2 ||
14623                     chiprevid == CHIPREV_ID_5701_B5) {
14624                         void __iomem *sram_base;
14625
14626                         /* Write some dummy words into the SRAM status block
14627                          * area, see if it reads back correctly.  If the return
14628                          * value is bad, force enable the PCIX workaround.
14629                          */
14630                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14631
14632                         writel(0x00000000, sram_base);
14633                         writel(0x00000000, sram_base + 4);
14634                         writel(0xffffffff, sram_base + 4);
14635                         if (readl(sram_base) != 0x00000000)
14636                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14637                 }
14638         }
14639
14640         udelay(50);
14641         tg3_nvram_init(tp);
14642
14643         grc_misc_cfg = tr32(GRC_MISC_CFG);
14644         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14645
14646         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14647             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14648              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14649                 tg3_flag_set(tp, IS_5788);
14650
14651         if (!tg3_flag(tp, IS_5788) &&
14652             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14653                 tg3_flag_set(tp, TAGGED_STATUS);
14654         if (tg3_flag(tp, TAGGED_STATUS)) {
14655                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14656                                       HOSTCC_MODE_CLRTICK_TXBD);
14657
14658                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14659                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14660                                        tp->misc_host_ctrl);
14661         }
14662
14663         /* Preserve the APE MAC_MODE bits */
14664         if (tg3_flag(tp, ENABLE_APE))
14665                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14666         else
14667                 tp->mac_mode = 0;
14668
14669         /* these are limited to 10/100 only */
14670         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14671              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14672             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14673              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14674              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14675               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14676               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14677             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14678              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14679               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14680               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14684             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14685                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14686
14687         err = tg3_phy_probe(tp);
14688         if (err) {
14689                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14690                 /* ... but do not return immediately ... */
14691                 tg3_mdio_fini(tp);
14692         }
14693
14694         tg3_read_vpd(tp);
14695         tg3_read_fw_ver(tp);
14696
14697         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14698                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14699         } else {
14700                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14701                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14702                 else
14703                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14704         }
14705
14706         /* 5700 {AX,BX} chips have a broken status block link
14707          * change bit implementation, so we must use the
14708          * status register in those cases.
14709          */
14710         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14711                 tg3_flag_set(tp, USE_LINKCHG_REG);
14712         else
14713                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14714
14715         /* The led_ctrl is set during tg3_phy_probe, here we might
14716          * have to force the link status polling mechanism based
14717          * upon subsystem IDs.
14718          */
14719         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14720             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14721             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14722                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14723                 tg3_flag_set(tp, USE_LINKCHG_REG);
14724         }
14725
14726         /* For all SERDES we poll the MAC status register. */
14727         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14728                 tg3_flag_set(tp, POLL_SERDES);
14729         else
14730                 tg3_flag_clear(tp, POLL_SERDES);
14731
14732         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14733         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14734         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14735             tg3_flag(tp, PCIX_MODE)) {
14736                 tp->rx_offset = NET_SKB_PAD;
14737 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14738                 tp->rx_copy_thresh = ~(u16)0;
14739 #endif
14740         }
14741
14742         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14743         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14744         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14745
14746         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14747
14748         /* Increment the rx prod index on the rx std ring by at most
14749          * 8 for these chips to workaround hw errata.
14750          */
14751         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14752             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14753             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14754                 tp->rx_std_max_post = 8;
14755
14756         if (tg3_flag(tp, ASPM_WORKAROUND))
14757                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14758                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14759
14760         return err;
14761 }
14762
14763 #ifdef CONFIG_SPARC
14764 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14765 {
14766         struct net_device *dev = tp->dev;
14767         struct pci_dev *pdev = tp->pdev;
14768         struct device_node *dp = pci_device_to_OF_node(pdev);
14769         const unsigned char *addr;
14770         int len;
14771
14772         addr = of_get_property(dp, "local-mac-address", &len);
14773         if (addr && len == 6) {
14774                 memcpy(dev->dev_addr, addr, 6);
14775                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14776                 return 0;
14777         }
14778         return -ENODEV;
14779 }
14780
14781 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14782 {
14783         struct net_device *dev = tp->dev;
14784
14785         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14786         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14787         return 0;
14788 }
14789 #endif
14790
14791 static int __devinit tg3_get_device_address(struct tg3 *tp)
14792 {
14793         struct net_device *dev = tp->dev;
14794         u32 hi, lo, mac_offset;
14795         int addr_ok = 0;
14796
14797 #ifdef CONFIG_SPARC
14798         if (!tg3_get_macaddr_sparc(tp))
14799                 return 0;
14800 #endif
14801
14802         mac_offset = 0x7c;
14803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14804             tg3_flag(tp, 5780_CLASS)) {
14805                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14806                         mac_offset = 0xcc;
14807                 if (tg3_nvram_lock(tp))
14808                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14809                 else
14810                         tg3_nvram_unlock(tp);
14811         } else if (tg3_flag(tp, 5717_PLUS)) {
14812                 if (tp->pci_fn & 1)
14813                         mac_offset = 0xcc;
14814                 if (tp->pci_fn > 1)
14815                         mac_offset += 0x18c;
14816         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14817                 mac_offset = 0x10;
14818
14819         /* First try to get it from MAC address mailbox. */
14820         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14821         if ((hi >> 16) == 0x484b) {
14822                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14823                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14824
14825                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14826                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14827                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14828                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14829                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14830
14831                 /* Some old bootcode may report a 0 MAC address in SRAM */
14832                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14833         }
14834         if (!addr_ok) {
14835                 /* Next, try NVRAM. */
14836                 if (!tg3_flag(tp, NO_NVRAM) &&
14837                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14838                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14839                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14840                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14841                 }
14842                 /* Finally just fetch it out of the MAC control regs. */
14843                 else {
14844                         hi = tr32(MAC_ADDR_0_HIGH);
14845                         lo = tr32(MAC_ADDR_0_LOW);
14846
14847                         dev->dev_addr[5] = lo & 0xff;
14848                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14849                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14850                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14851                         dev->dev_addr[1] = hi & 0xff;
14852                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14853                 }
14854         }
14855
14856         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14857 #ifdef CONFIG_SPARC
14858                 if (!tg3_get_default_macaddr_sparc(tp))
14859                         return 0;
14860 #endif
14861                 return -EINVAL;
14862         }
14863         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14864         return 0;
14865 }
14866
14867 #define BOUNDARY_SINGLE_CACHELINE       1
14868 #define BOUNDARY_MULTI_CACHELINE        2
14869
14870 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14871 {
14872         int cacheline_size;
14873         u8 byte;
14874         int goal;
14875
14876         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14877         if (byte == 0)
14878                 cacheline_size = 1024;
14879         else
14880                 cacheline_size = (int) byte * 4;
14881
14882         /* On 5703 and later chips, the boundary bits have no
14883          * effect.
14884          */
14885         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14886             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14887             !tg3_flag(tp, PCI_EXPRESS))
14888                 goto out;
14889
14890 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14891         goal = BOUNDARY_MULTI_CACHELINE;
14892 #else
14893 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14894         goal = BOUNDARY_SINGLE_CACHELINE;
14895 #else
14896         goal = 0;
14897 #endif
14898 #endif
14899
14900         if (tg3_flag(tp, 57765_PLUS)) {
14901                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14902                 goto out;
14903         }
14904
14905         if (!goal)
14906                 goto out;
14907
14908         /* PCI controllers on most RISC systems tend to disconnect
14909          * when a device tries to burst across a cache-line boundary.
14910          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14911          *
14912          * Unfortunately, for PCI-E there are only limited
14913          * write-side controls for this, and thus for reads
14914          * we will still get the disconnects.  We'll also waste
14915          * these PCI cycles for both read and write for chips
14916          * other than 5700 and 5701 which do not implement the
14917          * boundary bits.
14918          */
14919         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14920                 switch (cacheline_size) {
14921                 case 16:
14922                 case 32:
14923                 case 64:
14924                 case 128:
14925                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14926                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14927                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14928                         } else {
14929                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14930                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14931                         }
14932                         break;
14933
14934                 case 256:
14935                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14936                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14937                         break;
14938
14939                 default:
14940                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14941                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14942                         break;
14943                 }
14944         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14945                 switch (cacheline_size) {
14946                 case 16:
14947                 case 32:
14948                 case 64:
14949                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14950                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14951                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14952                                 break;
14953                         }
14954                         /* fallthrough */
14955                 case 128:
14956                 default:
14957                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14958                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14959                         break;
14960                 }
14961         } else {
14962                 switch (cacheline_size) {
14963                 case 16:
14964                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14965                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14966                                         DMA_RWCTRL_WRITE_BNDRY_16);
14967                                 break;
14968                         }
14969                         /* fallthrough */
14970                 case 32:
14971                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14972                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14973                                         DMA_RWCTRL_WRITE_BNDRY_32);
14974                                 break;
14975                         }
14976                         /* fallthrough */
14977                 case 64:
14978                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14979                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14980                                         DMA_RWCTRL_WRITE_BNDRY_64);
14981                                 break;
14982                         }
14983                         /* fallthrough */
14984                 case 128:
14985                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14986                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14987                                         DMA_RWCTRL_WRITE_BNDRY_128);
14988                                 break;
14989                         }
14990                         /* fallthrough */
14991                 case 256:
14992                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14993                                 DMA_RWCTRL_WRITE_BNDRY_256);
14994                         break;
14995                 case 512:
14996                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14997                                 DMA_RWCTRL_WRITE_BNDRY_512);
14998                         break;
14999                 case 1024:
15000                 default:
15001                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15002                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15003                         break;
15004                 }
15005         }
15006
15007 out:
15008         return val;
15009 }
15010
15011 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15012 {
15013         struct tg3_internal_buffer_desc test_desc;
15014         u32 sram_dma_descs;
15015         int i, ret;
15016
15017         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15018
15019         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15020         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15021         tw32(RDMAC_STATUS, 0);
15022         tw32(WDMAC_STATUS, 0);
15023
15024         tw32(BUFMGR_MODE, 0);
15025         tw32(FTQ_RESET, 0);
15026
15027         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15028         test_desc.addr_lo = buf_dma & 0xffffffff;
15029         test_desc.nic_mbuf = 0x00002100;
15030         test_desc.len = size;
15031
15032         /*
15033          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15034          * the *second* time the tg3 driver was getting loaded after an
15035          * initial scan.
15036          *
15037          * Broadcom tells me:
15038          *   ...the DMA engine is connected to the GRC block and a DMA
15039          *   reset may affect the GRC block in some unpredictable way...
15040          *   The behavior of resets to individual blocks has not been tested.
15041          *
15042          * Broadcom noted the GRC reset will also reset all sub-components.
15043          */
15044         if (to_device) {
15045                 test_desc.cqid_sqid = (13 << 8) | 2;
15046
15047                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15048                 udelay(40);
15049         } else {
15050                 test_desc.cqid_sqid = (16 << 8) | 7;
15051
15052                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15053                 udelay(40);
15054         }
15055         test_desc.flags = 0x00000005;
15056
15057         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15058                 u32 val;
15059
15060                 val = *(((u32 *)&test_desc) + i);
15061                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15062                                        sram_dma_descs + (i * sizeof(u32)));
15063                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15064         }
15065         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15066
15067         if (to_device)
15068                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15069         else
15070                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15071
15072         ret = -ENODEV;
15073         for (i = 0; i < 40; i++) {
15074                 u32 val;
15075
15076                 if (to_device)
15077                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15078                 else
15079                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15080                 if ((val & 0xffff) == sram_dma_descs) {
15081                         ret = 0;
15082                         break;
15083                 }
15084
15085                 udelay(100);
15086         }
15087
15088         return ret;
15089 }
15090
15091 #define TEST_BUFFER_SIZE        0x2000
15092
15093 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15094         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15095         { },
15096 };
15097
15098 static int __devinit tg3_test_dma(struct tg3 *tp)
15099 {
15100         dma_addr_t buf_dma;
15101         u32 *buf, saved_dma_rwctrl;
15102         int ret = 0;
15103
15104         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15105                                  &buf_dma, GFP_KERNEL);
15106         if (!buf) {
15107                 ret = -ENOMEM;
15108                 goto out_nofree;
15109         }
15110
15111         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15112                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15113
15114         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15115
15116         if (tg3_flag(tp, 57765_PLUS))
15117                 goto out;
15118
15119         if (tg3_flag(tp, PCI_EXPRESS)) {
15120                 /* DMA read watermark not used on PCIE */
15121                 tp->dma_rwctrl |= 0x00180000;
15122         } else if (!tg3_flag(tp, PCIX_MODE)) {
15123                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15124                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15125                         tp->dma_rwctrl |= 0x003f0000;
15126                 else
15127                         tp->dma_rwctrl |= 0x003f000f;
15128         } else {
15129                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15130                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15131                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15132                         u32 read_water = 0x7;
15133
15134                         /* If the 5704 is behind the EPB bridge, we can
15135                          * do the less restrictive ONE_DMA workaround for
15136                          * better performance.
15137                          */
15138                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15139                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15140                                 tp->dma_rwctrl |= 0x8000;
15141                         else if (ccval == 0x6 || ccval == 0x7)
15142                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15143
15144                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15145                                 read_water = 4;
15146                         /* Set bit 23 to enable PCIX hw bug fix */
15147                         tp->dma_rwctrl |=
15148                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15149                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15150                                 (1 << 23);
15151                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15152                         /* 5780 always in PCIX mode */
15153                         tp->dma_rwctrl |= 0x00144000;
15154                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15155                         /* 5714 always in PCIX mode */
15156                         tp->dma_rwctrl |= 0x00148000;
15157                 } else {
15158                         tp->dma_rwctrl |= 0x001b000f;
15159                 }
15160         }
15161
15162         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15163             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15164                 tp->dma_rwctrl &= 0xfffffff0;
15165
15166         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15167             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15168                 /* Remove this if it causes problems for some boards. */
15169                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15170
15171                 /* On 5700/5701 chips, we need to set this bit.
15172                  * Otherwise the chip will issue cacheline transactions
15173                  * to streamable DMA memory with not all the byte
15174                  * enables turned on.  This is an error on several
15175                  * RISC PCI controllers, in particular sparc64.
15176                  *
15177                  * On 5703/5704 chips, this bit has been reassigned
15178                  * a different meaning.  In particular, it is used
15179                  * on those chips to enable a PCI-X workaround.
15180                  */
15181                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15182         }
15183
15184         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15185
15186 #if 0
15187         /* Unneeded, already done by tg3_get_invariants.  */
15188         tg3_switch_clocks(tp);
15189 #endif
15190
15191         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15192             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15193                 goto out;
15194
15195         /* It is best to perform DMA test with maximum write burst size
15196          * to expose the 5700/5701 write DMA bug.
15197          */
15198         saved_dma_rwctrl = tp->dma_rwctrl;
15199         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15200         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15201
15202         while (1) {
15203                 u32 *p = buf, i;
15204
15205                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15206                         p[i] = i;
15207
15208                 /* Send the buffer to the chip. */
15209                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15210                 if (ret) {
15211                         dev_err(&tp->pdev->dev,
15212                                 "%s: Buffer write failed. err = %d\n",
15213                                 __func__, ret);
15214                         break;
15215                 }
15216
15217 #if 0
15218                 /* validate data reached card RAM correctly. */
15219                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15220                         u32 val;
15221                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15222                         if (le32_to_cpu(val) != p[i]) {
15223                                 dev_err(&tp->pdev->dev,
15224                                         "%s: Buffer corrupted on device! "
15225                                         "(%d != %d)\n", __func__, val, i);
15226                                 /* ret = -ENODEV here? */
15227                         }
15228                         p[i] = 0;
15229                 }
15230 #endif
15231                 /* Now read it back. */
15232                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15233                 if (ret) {
15234                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15235                                 "err = %d\n", __func__, ret);
15236                         break;
15237                 }
15238
15239                 /* Verify it. */
15240                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15241                         if (p[i] == i)
15242                                 continue;
15243
15244                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15245                             DMA_RWCTRL_WRITE_BNDRY_16) {
15246                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15247                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15248                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15249                                 break;
15250                         } else {
15251                                 dev_err(&tp->pdev->dev,
15252                                         "%s: Buffer corrupted on read back! "
15253                                         "(%d != %d)\n", __func__, p[i], i);
15254                                 ret = -ENODEV;
15255                                 goto out;
15256                         }
15257                 }
15258
15259                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15260                         /* Success. */
15261                         ret = 0;
15262                         break;
15263                 }
15264         }
15265         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15266             DMA_RWCTRL_WRITE_BNDRY_16) {
15267                 /* DMA test passed without adjusting DMA boundary,
15268                  * now look for chipsets that are known to expose the
15269                  * DMA bug without failing the test.
15270                  */
15271                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15272                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15273                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15274                 } else {
15275                         /* Safe to use the calculated DMA boundary. */
15276                         tp->dma_rwctrl = saved_dma_rwctrl;
15277                 }
15278
15279                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15280         }
15281
15282 out:
15283         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15284 out_nofree:
15285         return ret;
15286 }
15287
15288 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15289 {
15290         if (tg3_flag(tp, 57765_PLUS)) {
15291                 tp->bufmgr_config.mbuf_read_dma_low_water =
15292                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15293                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15294                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15295                 tp->bufmgr_config.mbuf_high_water =
15296                         DEFAULT_MB_HIGH_WATER_57765;
15297
15298                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15299                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15300                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15301                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15302                 tp->bufmgr_config.mbuf_high_water_jumbo =
15303                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15304         } else if (tg3_flag(tp, 5705_PLUS)) {
15305                 tp->bufmgr_config.mbuf_read_dma_low_water =
15306                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15307                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15308                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15309                 tp->bufmgr_config.mbuf_high_water =
15310                         DEFAULT_MB_HIGH_WATER_5705;
15311                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15312                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15313                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15314                         tp->bufmgr_config.mbuf_high_water =
15315                                 DEFAULT_MB_HIGH_WATER_5906;
15316                 }
15317
15318                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15319                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15320                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15321                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15322                 tp->bufmgr_config.mbuf_high_water_jumbo =
15323                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15324         } else {
15325                 tp->bufmgr_config.mbuf_read_dma_low_water =
15326                         DEFAULT_MB_RDMA_LOW_WATER;
15327                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15328                         DEFAULT_MB_MACRX_LOW_WATER;
15329                 tp->bufmgr_config.mbuf_high_water =
15330                         DEFAULT_MB_HIGH_WATER;
15331
15332                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15333                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15334                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15335                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15336                 tp->bufmgr_config.mbuf_high_water_jumbo =
15337                         DEFAULT_MB_HIGH_WATER_JUMBO;
15338         }
15339
15340         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15341         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15342 }
15343
15344 static char * __devinit tg3_phy_string(struct tg3 *tp)
15345 {
15346         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15347         case TG3_PHY_ID_BCM5400:        return "5400";
15348         case TG3_PHY_ID_BCM5401:        return "5401";
15349         case TG3_PHY_ID_BCM5411:        return "5411";
15350         case TG3_PHY_ID_BCM5701:        return "5701";
15351         case TG3_PHY_ID_BCM5703:        return "5703";
15352         case TG3_PHY_ID_BCM5704:        return "5704";
15353         case TG3_PHY_ID_BCM5705:        return "5705";
15354         case TG3_PHY_ID_BCM5750:        return "5750";
15355         case TG3_PHY_ID_BCM5752:        return "5752";
15356         case TG3_PHY_ID_BCM5714:        return "5714";
15357         case TG3_PHY_ID_BCM5780:        return "5780";
15358         case TG3_PHY_ID_BCM5755:        return "5755";
15359         case TG3_PHY_ID_BCM5787:        return "5787";
15360         case TG3_PHY_ID_BCM5784:        return "5784";
15361         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15362         case TG3_PHY_ID_BCM5906:        return "5906";
15363         case TG3_PHY_ID_BCM5761:        return "5761";
15364         case TG3_PHY_ID_BCM5718C:       return "5718C";
15365         case TG3_PHY_ID_BCM5718S:       return "5718S";
15366         case TG3_PHY_ID_BCM57765:       return "57765";
15367         case TG3_PHY_ID_BCM5719C:       return "5719C";
15368         case TG3_PHY_ID_BCM5720C:       return "5720C";
15369         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15370         case 0:                 return "serdes";
15371         default:                return "unknown";
15372         }
15373 }
15374
15375 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15376 {
15377         if (tg3_flag(tp, PCI_EXPRESS)) {
15378                 strcpy(str, "PCI Express");
15379                 return str;
15380         } else if (tg3_flag(tp, PCIX_MODE)) {
15381                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15382
15383                 strcpy(str, "PCIX:");
15384
15385                 if ((clock_ctrl == 7) ||
15386                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15387                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15388                         strcat(str, "133MHz");
15389                 else if (clock_ctrl == 0)
15390                         strcat(str, "33MHz");
15391                 else if (clock_ctrl == 2)
15392                         strcat(str, "50MHz");
15393                 else if (clock_ctrl == 4)
15394                         strcat(str, "66MHz");
15395                 else if (clock_ctrl == 6)
15396                         strcat(str, "100MHz");
15397         } else {
15398                 strcpy(str, "PCI:");
15399                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15400                         strcat(str, "66MHz");
15401                 else
15402                         strcat(str, "33MHz");
15403         }
15404         if (tg3_flag(tp, PCI_32BIT))
15405                 strcat(str, ":32-bit");
15406         else
15407                 strcat(str, ":64-bit");
15408         return str;
15409 }
15410
15411 static void __devinit tg3_init_coal(struct tg3 *tp)
15412 {
15413         struct ethtool_coalesce *ec = &tp->coal;
15414
15415         memset(ec, 0, sizeof(*ec));
15416         ec->cmd = ETHTOOL_GCOALESCE;
15417         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15418         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15419         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15420         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15421         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15422         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15423         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15424         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15425         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15426
15427         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15428                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15429                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15430                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15431                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15432                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15433         }
15434
15435         if (tg3_flag(tp, 5705_PLUS)) {
15436                 ec->rx_coalesce_usecs_irq = 0;
15437                 ec->tx_coalesce_usecs_irq = 0;
15438                 ec->stats_block_coalesce_usecs = 0;
15439         }
15440 }
15441
15442 static int __devinit tg3_init_one(struct pci_dev *pdev,
15443                                   const struct pci_device_id *ent)
15444 {
15445         struct net_device *dev;
15446         struct tg3 *tp;
15447         int i, err, pm_cap;
15448         u32 sndmbx, rcvmbx, intmbx;
15449         char str[40];
15450         u64 dma_mask, persist_dma_mask;
15451         netdev_features_t features = 0;
15452
15453         printk_once(KERN_INFO "%s\n", version);
15454
15455         err = pci_enable_device(pdev);
15456         if (err) {
15457                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15458                 return err;
15459         }
15460
15461         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15462         if (err) {
15463                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15464                 goto err_out_disable_pdev;
15465         }
15466
15467         pci_set_master(pdev);
15468
15469         /* Find power-management capability. */
15470         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15471         if (pm_cap == 0) {
15472                 dev_err(&pdev->dev,
15473                         "Cannot find Power Management capability, aborting\n");
15474                 err = -EIO;
15475                 goto err_out_free_res;
15476         }
15477
15478         err = pci_set_power_state(pdev, PCI_D0);
15479         if (err) {
15480                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15481                 goto err_out_free_res;
15482         }
15483
15484         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15485         if (!dev) {
15486                 err = -ENOMEM;
15487                 goto err_out_power_down;
15488         }
15489
15490         SET_NETDEV_DEV(dev, &pdev->dev);
15491
15492         tp = netdev_priv(dev);
15493         tp->pdev = pdev;
15494         tp->dev = dev;
15495         tp->pm_cap = pm_cap;
15496         tp->rx_mode = TG3_DEF_RX_MODE;
15497         tp->tx_mode = TG3_DEF_TX_MODE;
15498
15499         if (tg3_debug > 0)
15500                 tp->msg_enable = tg3_debug;
15501         else
15502                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15503
15504         /* The word/byte swap controls here control register access byte
15505          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15506          * setting below.
15507          */
15508         tp->misc_host_ctrl =
15509                 MISC_HOST_CTRL_MASK_PCI_INT |
15510                 MISC_HOST_CTRL_WORD_SWAP |
15511                 MISC_HOST_CTRL_INDIR_ACCESS |
15512                 MISC_HOST_CTRL_PCISTATE_RW;
15513
15514         /* The NONFRM (non-frame) byte/word swap controls take effect
15515          * on descriptor entries, anything which isn't packet data.
15516          *
15517          * The StrongARM chips on the board (one for tx, one for rx)
15518          * are running in big-endian mode.
15519          */
15520         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15521                         GRC_MODE_WSWAP_NONFRM_DATA);
15522 #ifdef __BIG_ENDIAN
15523         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15524 #endif
15525         spin_lock_init(&tp->lock);
15526         spin_lock_init(&tp->indirect_lock);
15527         INIT_WORK(&tp->reset_task, tg3_reset_task);
15528
15529         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15530         if (!tp->regs) {
15531                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15532                 err = -ENOMEM;
15533                 goto err_out_free_dev;
15534         }
15535
15536         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15537             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15538             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15539             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15540             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15541             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15542             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15543             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15544                 tg3_flag_set(tp, ENABLE_APE);
15545                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15546                 if (!tp->aperegs) {
15547                         dev_err(&pdev->dev,
15548                                 "Cannot map APE registers, aborting\n");
15549                         err = -ENOMEM;
15550                         goto err_out_iounmap;
15551                 }
15552         }
15553
15554         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15555         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15556
15557         dev->ethtool_ops = &tg3_ethtool_ops;
15558         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15559         dev->netdev_ops = &tg3_netdev_ops;
15560         dev->irq = pdev->irq;
15561
15562         err = tg3_get_invariants(tp);
15563         if (err) {
15564                 dev_err(&pdev->dev,
15565                         "Problem fetching invariants of chip, aborting\n");
15566                 goto err_out_apeunmap;
15567         }
15568
15569         /* The EPB bridge inside 5714, 5715, and 5780 and any
15570          * device behind the EPB cannot support DMA addresses > 40-bit.
15571          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15572          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15573          * do DMA address check in tg3_start_xmit().
15574          */
15575         if (tg3_flag(tp, IS_5788))
15576                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15577         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15578                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15579 #ifdef CONFIG_HIGHMEM
15580                 dma_mask = DMA_BIT_MASK(64);
15581 #endif
15582         } else
15583                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15584
15585         /* Configure DMA attributes. */
15586         if (dma_mask > DMA_BIT_MASK(32)) {
15587                 err = pci_set_dma_mask(pdev, dma_mask);
15588                 if (!err) {
15589                         features |= NETIF_F_HIGHDMA;
15590                         err = pci_set_consistent_dma_mask(pdev,
15591                                                           persist_dma_mask);
15592                         if (err < 0) {
15593                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15594                                         "DMA for consistent allocations\n");
15595                                 goto err_out_apeunmap;
15596                         }
15597                 }
15598         }
15599         if (err || dma_mask == DMA_BIT_MASK(32)) {
15600                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15601                 if (err) {
15602                         dev_err(&pdev->dev,
15603                                 "No usable DMA configuration, aborting\n");
15604                         goto err_out_apeunmap;
15605                 }
15606         }
15607
15608         tg3_init_bufmgr_config(tp);
15609
15610         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15611
15612         /* 5700 B0 chips do not support checksumming correctly due
15613          * to hardware bugs.
15614          */
15615         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15616                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15617
15618                 if (tg3_flag(tp, 5755_PLUS))
15619                         features |= NETIF_F_IPV6_CSUM;
15620         }
15621
15622         /* TSO is on by default on chips that support hardware TSO.
15623          * Firmware TSO on older chips gives lower performance, so it
15624          * is off by default, but can be enabled using ethtool.
15625          */
15626         if ((tg3_flag(tp, HW_TSO_1) ||
15627              tg3_flag(tp, HW_TSO_2) ||
15628              tg3_flag(tp, HW_TSO_3)) &&
15629             (features & NETIF_F_IP_CSUM))
15630                 features |= NETIF_F_TSO;
15631         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15632                 if (features & NETIF_F_IPV6_CSUM)
15633                         features |= NETIF_F_TSO6;
15634                 if (tg3_flag(tp, HW_TSO_3) ||
15635                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15636                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15637                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15638                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15639                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15640                         features |= NETIF_F_TSO_ECN;
15641         }
15642
15643         dev->features |= features;
15644         dev->vlan_features |= features;
15645
15646         /*
15647          * Add loopback capability only for a subset of devices that support
15648          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15649          * loopback for the remaining devices.
15650          */
15651         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15652             !tg3_flag(tp, CPMU_PRESENT))
15653                 /* Add the loopback capability */
15654                 features |= NETIF_F_LOOPBACK;
15655
15656         dev->hw_features |= features;
15657
15658         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15659             !tg3_flag(tp, TSO_CAPABLE) &&
15660             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15661                 tg3_flag_set(tp, MAX_RXPEND_64);
15662                 tp->rx_pending = 63;
15663         }
15664
15665         err = tg3_get_device_address(tp);
15666         if (err) {
15667                 dev_err(&pdev->dev,
15668                         "Could not obtain valid ethernet address, aborting\n");
15669                 goto err_out_apeunmap;
15670         }
15671
15672         /*
15673          * Reset chip in case UNDI or EFI driver did not shutdown
15674          * DMA self test will enable WDMAC and we'll see (spurious)
15675          * pending DMA on the PCI bus at that point.
15676          */
15677         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15678             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15679                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15680                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15681         }
15682
15683         err = tg3_test_dma(tp);
15684         if (err) {
15685                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15686                 goto err_out_apeunmap;
15687         }
15688
15689         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15690         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15691         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15692         for (i = 0; i < tp->irq_max; i++) {
15693                 struct tg3_napi *tnapi = &tp->napi[i];
15694
15695                 tnapi->tp = tp;
15696                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15697
15698                 tnapi->int_mbox = intmbx;
15699                 if (i <= 4)
15700                         intmbx += 0x8;
15701                 else
15702                         intmbx += 0x4;
15703
15704                 tnapi->consmbox = rcvmbx;
15705                 tnapi->prodmbox = sndmbx;
15706
15707                 if (i)
15708                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15709                 else
15710                         tnapi->coal_now = HOSTCC_MODE_NOW;
15711
15712                 if (!tg3_flag(tp, SUPPORT_MSIX))
15713                         break;
15714
15715                 /*
15716                  * If we support MSIX, we'll be using RSS.  If we're using
15717                  * RSS, the first vector only handles link interrupts and the
15718                  * remaining vectors handle rx and tx interrupts.  Reuse the
15719                  * mailbox values for the next iteration.  The values we setup
15720                  * above are still useful for the single vectored mode.
15721                  */
15722                 if (!i)
15723                         continue;
15724
15725                 rcvmbx += 0x8;
15726
15727                 if (sndmbx & 0x4)
15728                         sndmbx -= 0x4;
15729                 else
15730                         sndmbx += 0xc;
15731         }
15732
15733         tg3_init_coal(tp);
15734
15735         pci_set_drvdata(pdev, dev);
15736
15737         if (tg3_flag(tp, 5717_PLUS)) {
15738                 /* Resume a low-power mode */
15739                 tg3_frob_aux_power(tp, false);
15740         }
15741
15742         err = register_netdev(dev);
15743         if (err) {
15744                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15745                 goto err_out_apeunmap;
15746         }
15747
15748         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15749                     tp->board_part_number,
15750                     tp->pci_chip_rev_id,
15751                     tg3_bus_string(tp, str),
15752                     dev->dev_addr);
15753
15754         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15755                 struct phy_device *phydev;
15756                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15757                 netdev_info(dev,
15758                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15759                             phydev->drv->name, dev_name(&phydev->dev));
15760         } else {
15761                 char *ethtype;
15762
15763                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15764                         ethtype = "10/100Base-TX";
15765                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15766                         ethtype = "1000Base-SX";
15767                 else
15768                         ethtype = "10/100/1000Base-T";
15769
15770                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15771                             "(WireSpeed[%d], EEE[%d])\n",
15772                             tg3_phy_string(tp), ethtype,
15773                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15774                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15775         }
15776
15777         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15778                     (dev->features & NETIF_F_RXCSUM) != 0,
15779                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15780                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15781                     tg3_flag(tp, ENABLE_ASF) != 0,
15782                     tg3_flag(tp, TSO_CAPABLE) != 0);
15783         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15784                     tp->dma_rwctrl,
15785                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15786                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15787
15788         pci_save_state(pdev);
15789
15790         return 0;
15791
15792 err_out_apeunmap:
15793         if (tp->aperegs) {
15794                 iounmap(tp->aperegs);
15795                 tp->aperegs = NULL;
15796         }
15797
15798 err_out_iounmap:
15799         if (tp->regs) {
15800                 iounmap(tp->regs);
15801                 tp->regs = NULL;
15802         }
15803
15804 err_out_free_dev:
15805         free_netdev(dev);
15806
15807 err_out_power_down:
15808         pci_set_power_state(pdev, PCI_D3hot);
15809
15810 err_out_free_res:
15811         pci_release_regions(pdev);
15812
15813 err_out_disable_pdev:
15814         pci_disable_device(pdev);
15815         pci_set_drvdata(pdev, NULL);
15816         return err;
15817 }
15818
15819 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15820 {
15821         struct net_device *dev = pci_get_drvdata(pdev);
15822
15823         if (dev) {
15824                 struct tg3 *tp = netdev_priv(dev);
15825
15826                 if (tp->fw)
15827                         release_firmware(tp->fw);
15828
15829                 tg3_reset_task_cancel(tp);
15830
15831                 if (tg3_flag(tp, USE_PHYLIB)) {
15832                         tg3_phy_fini(tp);
15833                         tg3_mdio_fini(tp);
15834                 }
15835
15836                 unregister_netdev(dev);
15837                 if (tp->aperegs) {
15838                         iounmap(tp->aperegs);
15839                         tp->aperegs = NULL;
15840                 }
15841                 if (tp->regs) {
15842                         iounmap(tp->regs);
15843                         tp->regs = NULL;
15844                 }
15845                 free_netdev(dev);
15846                 pci_release_regions(pdev);
15847                 pci_disable_device(pdev);
15848                 pci_set_drvdata(pdev, NULL);
15849         }
15850 }
15851
15852 #ifdef CONFIG_PM_SLEEP
15853 static int tg3_suspend(struct device *device)
15854 {
15855         struct pci_dev *pdev = to_pci_dev(device);
15856         struct net_device *dev = pci_get_drvdata(pdev);
15857         struct tg3 *tp = netdev_priv(dev);
15858         int err;
15859
15860         if (!netif_running(dev))
15861                 return 0;
15862
15863         tg3_reset_task_cancel(tp);
15864         tg3_phy_stop(tp);
15865         tg3_netif_stop(tp);
15866
15867         del_timer_sync(&tp->timer);
15868
15869         tg3_full_lock(tp, 1);
15870         tg3_disable_ints(tp);
15871         tg3_full_unlock(tp);
15872
15873         netif_device_detach(dev);
15874
15875         tg3_full_lock(tp, 0);
15876         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15877         tg3_flag_clear(tp, INIT_COMPLETE);
15878         tg3_full_unlock(tp);
15879
15880         err = tg3_power_down_prepare(tp);
15881         if (err) {
15882                 int err2;
15883
15884                 tg3_full_lock(tp, 0);
15885
15886                 tg3_flag_set(tp, INIT_COMPLETE);
15887                 err2 = tg3_restart_hw(tp, 1);
15888                 if (err2)
15889                         goto out;
15890
15891                 tp->timer.expires = jiffies + tp->timer_offset;
15892                 add_timer(&tp->timer);
15893
15894                 netif_device_attach(dev);
15895                 tg3_netif_start(tp);
15896
15897 out:
15898                 tg3_full_unlock(tp);
15899
15900                 if (!err2)
15901                         tg3_phy_start(tp);
15902         }
15903
15904         return err;
15905 }
15906
15907 static int tg3_resume(struct device *device)
15908 {
15909         struct pci_dev *pdev = to_pci_dev(device);
15910         struct net_device *dev = pci_get_drvdata(pdev);
15911         struct tg3 *tp = netdev_priv(dev);
15912         int err;
15913
15914         if (!netif_running(dev))
15915                 return 0;
15916
15917         netif_device_attach(dev);
15918
15919         tg3_full_lock(tp, 0);
15920
15921         tg3_flag_set(tp, INIT_COMPLETE);
15922         err = tg3_restart_hw(tp, 1);
15923         if (err)
15924                 goto out;
15925
15926         tp->timer.expires = jiffies + tp->timer_offset;
15927         add_timer(&tp->timer);
15928
15929         tg3_netif_start(tp);
15930
15931 out:
15932         tg3_full_unlock(tp);
15933
15934         if (!err)
15935                 tg3_phy_start(tp);
15936
15937         return err;
15938 }
15939
15940 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15941 #define TG3_PM_OPS (&tg3_pm_ops)
15942
15943 #else
15944
15945 #define TG3_PM_OPS NULL
15946
15947 #endif /* CONFIG_PM_SLEEP */
15948
15949 /**
15950  * tg3_io_error_detected - called when PCI error is detected
15951  * @pdev: Pointer to PCI device
15952  * @state: The current pci connection state
15953  *
15954  * This function is called after a PCI bus error affecting
15955  * this device has been detected.
15956  */
15957 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15958                                               pci_channel_state_t state)
15959 {
15960         struct net_device *netdev = pci_get_drvdata(pdev);
15961         struct tg3 *tp = netdev_priv(netdev);
15962         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15963
15964         netdev_info(netdev, "PCI I/O error detected\n");
15965
15966         rtnl_lock();
15967
15968         if (!netif_running(netdev))
15969                 goto done;
15970
15971         tg3_phy_stop(tp);
15972
15973         tg3_netif_stop(tp);
15974
15975         del_timer_sync(&tp->timer);
15976
15977         /* Want to make sure that the reset task doesn't run */
15978         tg3_reset_task_cancel(tp);
15979         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15980
15981         netif_device_detach(netdev);
15982
15983         /* Clean up software state, even if MMIO is blocked */
15984         tg3_full_lock(tp, 0);
15985         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15986         tg3_full_unlock(tp);
15987
15988 done:
15989         if (state == pci_channel_io_perm_failure)
15990                 err = PCI_ERS_RESULT_DISCONNECT;
15991         else
15992                 pci_disable_device(pdev);
15993
15994         rtnl_unlock();
15995
15996         return err;
15997 }
15998
15999 /**
16000  * tg3_io_slot_reset - called after the pci bus has been reset.
16001  * @pdev: Pointer to PCI device
16002  *
16003  * Restart the card from scratch, as if from a cold-boot.
16004  * At this point, the card has exprienced a hard reset,
16005  * followed by fixups by BIOS, and has its config space
16006  * set up identically to what it was at cold boot.
16007  */
16008 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16009 {
16010         struct net_device *netdev = pci_get_drvdata(pdev);
16011         struct tg3 *tp = netdev_priv(netdev);
16012         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16013         int err;
16014
16015         rtnl_lock();
16016
16017         if (pci_enable_device(pdev)) {
16018                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16019                 goto done;
16020         }
16021
16022         pci_set_master(pdev);
16023         pci_restore_state(pdev);
16024         pci_save_state(pdev);
16025
16026         if (!netif_running(netdev)) {
16027                 rc = PCI_ERS_RESULT_RECOVERED;
16028                 goto done;
16029         }
16030
16031         err = tg3_power_up(tp);
16032         if (err)
16033                 goto done;
16034
16035         rc = PCI_ERS_RESULT_RECOVERED;
16036
16037 done:
16038         rtnl_unlock();
16039
16040         return rc;
16041 }
16042
16043 /**
16044  * tg3_io_resume - called when traffic can start flowing again.
16045  * @pdev: Pointer to PCI device
16046  *
16047  * This callback is called when the error recovery driver tells
16048  * us that its OK to resume normal operation.
16049  */
16050 static void tg3_io_resume(struct pci_dev *pdev)
16051 {
16052         struct net_device *netdev = pci_get_drvdata(pdev);
16053         struct tg3 *tp = netdev_priv(netdev);
16054         int err;
16055
16056         rtnl_lock();
16057
16058         if (!netif_running(netdev))
16059                 goto done;
16060
16061         tg3_full_lock(tp, 0);
16062         tg3_flag_set(tp, INIT_COMPLETE);
16063         err = tg3_restart_hw(tp, 1);
16064         tg3_full_unlock(tp);
16065         if (err) {
16066                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16067                 goto done;
16068         }
16069
16070         netif_device_attach(netdev);
16071
16072         tp->timer.expires = jiffies + tp->timer_offset;
16073         add_timer(&tp->timer);
16074
16075         tg3_netif_start(tp);
16076
16077         tg3_phy_start(tp);
16078
16079 done:
16080         rtnl_unlock();
16081 }
16082
16083 static struct pci_error_handlers tg3_err_handler = {
16084         .error_detected = tg3_io_error_detected,
16085         .slot_reset     = tg3_io_slot_reset,
16086         .resume         = tg3_io_resume
16087 };
16088
16089 static struct pci_driver tg3_driver = {
16090         .name           = DRV_MODULE_NAME,
16091         .id_table       = tg3_pci_tbl,
16092         .probe          = tg3_init_one,
16093         .remove         = __devexit_p(tg3_remove_one),
16094         .err_handler    = &tg3_err_handler,
16095         .driver.pm      = TG3_PM_OPS,
16096 };
16097
16098 static int __init tg3_init(void)
16099 {
16100         return pci_register_driver(&tg3_driver);
16101 }
16102
16103 static void __exit tg3_cleanup(void)
16104 {
16105         pci_unregister_driver(&tg3_driver);
16106 }
16107
16108 module_init(tg3_init);
16109 module_exit(tg3_cleanup);