]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Disable new DMA engine for 57766
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2713 {
2714         u32 val;
2715
2716         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2718                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2719                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2720
2721                         sg_dig_ctrl |=
2722                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2723                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2724                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2725                 }
2726                 return;
2727         }
2728
2729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2730                 tg3_bmcr_reset(tp);
2731                 val = tr32(GRC_MISC_CFG);
2732                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2733                 udelay(40);
2734                 return;
2735         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2736                 u32 phytest;
2737                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2738                         u32 phy;
2739
2740                         tg3_writephy(tp, MII_ADVERTISE, 0);
2741                         tg3_writephy(tp, MII_BMCR,
2742                                      BMCR_ANENABLE | BMCR_ANRESTART);
2743
2744                         tg3_writephy(tp, MII_TG3_FET_TEST,
2745                                      phytest | MII_TG3_FET_SHADOW_EN);
2746                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2747                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2748                                 tg3_writephy(tp,
2749                                              MII_TG3_FET_SHDW_AUXMODE4,
2750                                              phy);
2751                         }
2752                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2753                 }
2754                 return;
2755         } else if (do_low_power) {
2756                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2758
2759                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2760                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2761                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2762                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2763         }
2764
2765         /* The PHY should not be powered down on some chips because
2766          * of bugs.
2767          */
2768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2771              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2772                 return;
2773
2774         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2775             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2776                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2777                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2778                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2779                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2780         }
2781
2782         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2783 }
2784
2785 /* tp->lock is held. */
2786 static int tg3_nvram_lock(struct tg3 *tp)
2787 {
2788         if (tg3_flag(tp, NVRAM)) {
2789                 int i;
2790
2791                 if (tp->nvram_lock_cnt == 0) {
2792                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2793                         for (i = 0; i < 8000; i++) {
2794                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2795                                         break;
2796                                 udelay(20);
2797                         }
2798                         if (i == 8000) {
2799                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2800                                 return -ENODEV;
2801                         }
2802                 }
2803                 tp->nvram_lock_cnt++;
2804         }
2805         return 0;
2806 }
2807
2808 /* tp->lock is held. */
2809 static void tg3_nvram_unlock(struct tg3 *tp)
2810 {
2811         if (tg3_flag(tp, NVRAM)) {
2812                 if (tp->nvram_lock_cnt > 0)
2813                         tp->nvram_lock_cnt--;
2814                 if (tp->nvram_lock_cnt == 0)
2815                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2816         }
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_enable_nvram_access(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2823                 u32 nvaccess = tr32(NVRAM_ACCESS);
2824
2825                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_disable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2836         }
2837 }
2838
2839 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2840                                         u32 offset, u32 *val)
2841 {
2842         u32 tmp;
2843         int i;
2844
2845         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2846                 return -EINVAL;
2847
2848         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2849                                         EEPROM_ADDR_DEVID_MASK |
2850                                         EEPROM_ADDR_READ);
2851         tw32(GRC_EEPROM_ADDR,
2852              tmp |
2853              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2854              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2855               EEPROM_ADDR_ADDR_MASK) |
2856              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2857
2858         for (i = 0; i < 1000; i++) {
2859                 tmp = tr32(GRC_EEPROM_ADDR);
2860
2861                 if (tmp & EEPROM_ADDR_COMPLETE)
2862                         break;
2863                 msleep(1);
2864         }
2865         if (!(tmp & EEPROM_ADDR_COMPLETE))
2866                 return -EBUSY;
2867
2868         tmp = tr32(GRC_EEPROM_DATA);
2869
2870         /*
2871          * The data will always be opposite the native endian
2872          * format.  Perform a blind byteswap to compensate.
2873          */
2874         *val = swab32(tmp);
2875
2876         return 0;
2877 }
2878
2879 #define NVRAM_CMD_TIMEOUT 10000
2880
2881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2882 {
2883         int i;
2884
2885         tw32(NVRAM_CMD, nvram_cmd);
2886         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2887                 udelay(10);
2888                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2889                         udelay(10);
2890                         break;
2891                 }
2892         }
2893
2894         if (i == NVRAM_CMD_TIMEOUT)
2895                 return -EBUSY;
2896
2897         return 0;
2898 }
2899
2900 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2901 {
2902         if (tg3_flag(tp, NVRAM) &&
2903             tg3_flag(tp, NVRAM_BUFFERED) &&
2904             tg3_flag(tp, FLASH) &&
2905             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2906             (tp->nvram_jedecnum == JEDEC_ATMEL))
2907
2908                 addr = ((addr / tp->nvram_pagesize) <<
2909                         ATMEL_AT45DB0X1B_PAGE_POS) +
2910                        (addr % tp->nvram_pagesize);
2911
2912         return addr;
2913 }
2914
2915 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2916 {
2917         if (tg3_flag(tp, NVRAM) &&
2918             tg3_flag(tp, NVRAM_BUFFERED) &&
2919             tg3_flag(tp, FLASH) &&
2920             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2921             (tp->nvram_jedecnum == JEDEC_ATMEL))
2922
2923                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2924                         tp->nvram_pagesize) +
2925                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2926
2927         return addr;
2928 }
2929
2930 /* NOTE: Data read in from NVRAM is byteswapped according to
2931  * the byteswapping settings for all other register accesses.
2932  * tg3 devices are BE devices, so on a BE machine, the data
2933  * returned will be exactly as it is seen in NVRAM.  On a LE
2934  * machine, the 32-bit value will be byteswapped.
2935  */
2936 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2937 {
2938         int ret;
2939
2940         if (!tg3_flag(tp, NVRAM))
2941                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2942
2943         offset = tg3_nvram_phys_addr(tp, offset);
2944
2945         if (offset > NVRAM_ADDR_MSK)
2946                 return -EINVAL;
2947
2948         ret = tg3_nvram_lock(tp);
2949         if (ret)
2950                 return ret;
2951
2952         tg3_enable_nvram_access(tp);
2953
2954         tw32(NVRAM_ADDR, offset);
2955         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2956                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2957
2958         if (ret == 0)
2959                 *val = tr32(NVRAM_RDDATA);
2960
2961         tg3_disable_nvram_access(tp);
2962
2963         tg3_nvram_unlock(tp);
2964
2965         return ret;
2966 }
2967
2968 /* Ensures NVRAM data is in bytestream format. */
2969 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2970 {
2971         u32 v;
2972         int res = tg3_nvram_read(tp, offset, &v);
2973         if (!res)
2974                 *val = cpu_to_be32(v);
2975         return res;
2976 }
2977
2978 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2979                                     u32 offset, u32 len, u8 *buf)
2980 {
2981         int i, j, rc = 0;
2982         u32 val;
2983
2984         for (i = 0; i < len; i += 4) {
2985                 u32 addr;
2986                 __be32 data;
2987
2988                 addr = offset + i;
2989
2990                 memcpy(&data, buf + i, 4);
2991
2992                 /*
2993                  * The SEEPROM interface expects the data to always be opposite
2994                  * the native endian format.  We accomplish this by reversing
2995                  * all the operations that would have been performed on the
2996                  * data from a call to tg3_nvram_read_be32().
2997                  */
2998                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
2999
3000                 val = tr32(GRC_EEPROM_ADDR);
3001                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3002
3003                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3004                         EEPROM_ADDR_READ);
3005                 tw32(GRC_EEPROM_ADDR, val |
3006                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3007                         (addr & EEPROM_ADDR_ADDR_MASK) |
3008                         EEPROM_ADDR_START |
3009                         EEPROM_ADDR_WRITE);
3010
3011                 for (j = 0; j < 1000; j++) {
3012                         val = tr32(GRC_EEPROM_ADDR);
3013
3014                         if (val & EEPROM_ADDR_COMPLETE)
3015                                 break;
3016                         msleep(1);
3017                 }
3018                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3019                         rc = -EBUSY;
3020                         break;
3021                 }
3022         }
3023
3024         return rc;
3025 }
3026
3027 /* offset and length are dword aligned */
3028 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3029                 u8 *buf)
3030 {
3031         int ret = 0;
3032         u32 pagesize = tp->nvram_pagesize;
3033         u32 pagemask = pagesize - 1;
3034         u32 nvram_cmd;
3035         u8 *tmp;
3036
3037         tmp = kmalloc(pagesize, GFP_KERNEL);
3038         if (tmp == NULL)
3039                 return -ENOMEM;
3040
3041         while (len) {
3042                 int j;
3043                 u32 phy_addr, page_off, size;
3044
3045                 phy_addr = offset & ~pagemask;
3046
3047                 for (j = 0; j < pagesize; j += 4) {
3048                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3049                                                   (__be32 *) (tmp + j));
3050                         if (ret)
3051                                 break;
3052                 }
3053                 if (ret)
3054                         break;
3055
3056                 page_off = offset & pagemask;
3057                 size = pagesize;
3058                 if (len < size)
3059                         size = len;
3060
3061                 len -= size;
3062
3063                 memcpy(tmp + page_off, buf, size);
3064
3065                 offset = offset + (pagesize - page_off);
3066
3067                 tg3_enable_nvram_access(tp);
3068
3069                 /*
3070                  * Before we can erase the flash page, we need
3071                  * to issue a special "write enable" command.
3072                  */
3073                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3074
3075                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3076                         break;
3077
3078                 /* Erase the target page */
3079                 tw32(NVRAM_ADDR, phy_addr);
3080
3081                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3082                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3083
3084                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3085                         break;
3086
3087                 /* Issue another write enable to start the write. */
3088                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3089
3090                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3091                         break;
3092
3093                 for (j = 0; j < pagesize; j += 4) {
3094                         __be32 data;
3095
3096                         data = *((__be32 *) (tmp + j));
3097
3098                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3099
3100                         tw32(NVRAM_ADDR, phy_addr + j);
3101
3102                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3103                                 NVRAM_CMD_WR;
3104
3105                         if (j == 0)
3106                                 nvram_cmd |= NVRAM_CMD_FIRST;
3107                         else if (j == (pagesize - 4))
3108                                 nvram_cmd |= NVRAM_CMD_LAST;
3109
3110                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3111                         if (ret)
3112                                 break;
3113                 }
3114                 if (ret)
3115                         break;
3116         }
3117
3118         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3119         tg3_nvram_exec_cmd(tp, nvram_cmd);
3120
3121         kfree(tmp);
3122
3123         return ret;
3124 }
3125
3126 /* offset and length are dword aligned */
3127 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3128                 u8 *buf)
3129 {
3130         int i, ret = 0;
3131
3132         for (i = 0; i < len; i += 4, offset += 4) {
3133                 u32 page_off, phy_addr, nvram_cmd;
3134                 __be32 data;
3135
3136                 memcpy(&data, buf + i, 4);
3137                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3138
3139                 page_off = offset % tp->nvram_pagesize;
3140
3141                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3142
3143                 tw32(NVRAM_ADDR, phy_addr);
3144
3145                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3146
3147                 if (page_off == 0 || i == 0)
3148                         nvram_cmd |= NVRAM_CMD_FIRST;
3149                 if (page_off == (tp->nvram_pagesize - 4))
3150                         nvram_cmd |= NVRAM_CMD_LAST;
3151
3152                 if (i == (len - 4))
3153                         nvram_cmd |= NVRAM_CMD_LAST;
3154
3155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3156                     !tg3_flag(tp, 5755_PLUS) &&
3157                     (tp->nvram_jedecnum == JEDEC_ST) &&
3158                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3159                         u32 cmd;
3160
3161                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3162                         ret = tg3_nvram_exec_cmd(tp, cmd);
3163                         if (ret)
3164                                 break;
3165                 }
3166                 if (!tg3_flag(tp, FLASH)) {
3167                         /* We always do complete word writes to eeprom. */
3168                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3169                 }
3170
3171                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3172                 if (ret)
3173                         break;
3174         }
3175         return ret;
3176 }
3177
3178 /* offset and length are dword aligned */
3179 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3180 {
3181         int ret;
3182
3183         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3184                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3185                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3186                 udelay(40);
3187         }
3188
3189         if (!tg3_flag(tp, NVRAM)) {
3190                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3191         } else {
3192                 u32 grc_mode;
3193
3194                 ret = tg3_nvram_lock(tp);
3195                 if (ret)
3196                         return ret;
3197
3198                 tg3_enable_nvram_access(tp);
3199                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3200                         tw32(NVRAM_WRITE1, 0x406);
3201
3202                 grc_mode = tr32(GRC_MODE);
3203                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3204
3205                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3206                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3207                                 buf);
3208                 } else {
3209                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3210                                 buf);
3211                 }
3212
3213                 grc_mode = tr32(GRC_MODE);
3214                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3215
3216                 tg3_disable_nvram_access(tp);
3217                 tg3_nvram_unlock(tp);
3218         }
3219
3220         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3221                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3222                 udelay(40);
3223         }
3224
3225         return ret;
3226 }
3227
3228 #define RX_CPU_SCRATCH_BASE     0x30000
3229 #define RX_CPU_SCRATCH_SIZE     0x04000
3230 #define TX_CPU_SCRATCH_BASE     0x34000
3231 #define TX_CPU_SCRATCH_SIZE     0x04000
3232
3233 /* tp->lock is held. */
3234 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3235 {
3236         int i;
3237
3238         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3239
3240         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3241                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3242
3243                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3244                 return 0;
3245         }
3246         if (offset == RX_CPU_BASE) {
3247                 for (i = 0; i < 10000; i++) {
3248                         tw32(offset + CPU_STATE, 0xffffffff);
3249                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3250                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3251                                 break;
3252                 }
3253
3254                 tw32(offset + CPU_STATE, 0xffffffff);
3255                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3256                 udelay(10);
3257         } else {
3258                 for (i = 0; i < 10000; i++) {
3259                         tw32(offset + CPU_STATE, 0xffffffff);
3260                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3261                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3262                                 break;
3263                 }
3264         }
3265
3266         if (i >= 10000) {
3267                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3268                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3269                 return -ENODEV;
3270         }
3271
3272         /* Clear firmware's nvram arbitration. */
3273         if (tg3_flag(tp, NVRAM))
3274                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3275         return 0;
3276 }
3277
3278 struct fw_info {
3279         unsigned int fw_base;
3280         unsigned int fw_len;
3281         const __be32 *fw_data;
3282 };
3283
3284 /* tp->lock is held. */
3285 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3286                                  u32 cpu_scratch_base, int cpu_scratch_size,
3287                                  struct fw_info *info)
3288 {
3289         int err, lock_err, i;
3290         void (*write_op)(struct tg3 *, u32, u32);
3291
3292         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3293                 netdev_err(tp->dev,
3294                            "%s: Trying to load TX cpu firmware which is 5705\n",
3295                            __func__);
3296                 return -EINVAL;
3297         }
3298
3299         if (tg3_flag(tp, 5705_PLUS))
3300                 write_op = tg3_write_mem;
3301         else
3302                 write_op = tg3_write_indirect_reg32;
3303
3304         /* It is possible that bootcode is still loading at this point.
3305          * Get the nvram lock first before halting the cpu.
3306          */
3307         lock_err = tg3_nvram_lock(tp);
3308         err = tg3_halt_cpu(tp, cpu_base);
3309         if (!lock_err)
3310                 tg3_nvram_unlock(tp);
3311         if (err)
3312                 goto out;
3313
3314         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3315                 write_op(tp, cpu_scratch_base + i, 0);
3316         tw32(cpu_base + CPU_STATE, 0xffffffff);
3317         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3318         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3319                 write_op(tp, (cpu_scratch_base +
3320                               (info->fw_base & 0xffff) +
3321                               (i * sizeof(u32))),
3322                               be32_to_cpu(info->fw_data[i]));
3323
3324         err = 0;
3325
3326 out:
3327         return err;
3328 }
3329
3330 /* tp->lock is held. */
3331 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3332 {
3333         struct fw_info info;
3334         const __be32 *fw_data;
3335         int err, i;
3336
3337         fw_data = (void *)tp->fw->data;
3338
3339         /* Firmware blob starts with version numbers, followed by
3340            start address and length. We are setting complete length.
3341            length = end_address_of_bss - start_address_of_text.
3342            Remainder is the blob to be loaded contiguously
3343            from start address. */
3344
3345         info.fw_base = be32_to_cpu(fw_data[1]);
3346         info.fw_len = tp->fw->size - 12;
3347         info.fw_data = &fw_data[3];
3348
3349         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3350                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3351                                     &info);
3352         if (err)
3353                 return err;
3354
3355         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3356                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3357                                     &info);
3358         if (err)
3359                 return err;
3360
3361         /* Now startup only the RX cpu. */
3362         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3363         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3364
3365         for (i = 0; i < 5; i++) {
3366                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3367                         break;
3368                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3369                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3370                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3371                 udelay(1000);
3372         }
3373         if (i >= 5) {
3374                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3375                            "should be %08x\n", __func__,
3376                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3377                 return -ENODEV;
3378         }
3379         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3380         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3381
3382         return 0;
3383 }
3384
3385 /* tp->lock is held. */
3386 static int tg3_load_tso_firmware(struct tg3 *tp)
3387 {
3388         struct fw_info info;
3389         const __be32 *fw_data;
3390         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3391         int err, i;
3392
3393         if (tg3_flag(tp, HW_TSO_1) ||
3394             tg3_flag(tp, HW_TSO_2) ||
3395             tg3_flag(tp, HW_TSO_3))
3396                 return 0;
3397
3398         fw_data = (void *)tp->fw->data;
3399
3400         /* Firmware blob starts with version numbers, followed by
3401            start address and length. We are setting complete length.
3402            length = end_address_of_bss - start_address_of_text.
3403            Remainder is the blob to be loaded contiguously
3404            from start address. */
3405
3406         info.fw_base = be32_to_cpu(fw_data[1]);
3407         cpu_scratch_size = tp->fw_len;
3408         info.fw_len = tp->fw->size - 12;
3409         info.fw_data = &fw_data[3];
3410
3411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3412                 cpu_base = RX_CPU_BASE;
3413                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3414         } else {
3415                 cpu_base = TX_CPU_BASE;
3416                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3417                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3418         }
3419
3420         err = tg3_load_firmware_cpu(tp, cpu_base,
3421                                     cpu_scratch_base, cpu_scratch_size,
3422                                     &info);
3423         if (err)
3424                 return err;
3425
3426         /* Now startup the cpu. */
3427         tw32(cpu_base + CPU_STATE, 0xffffffff);
3428         tw32_f(cpu_base + CPU_PC, info.fw_base);
3429
3430         for (i = 0; i < 5; i++) {
3431                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3432                         break;
3433                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3434                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3435                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3436                 udelay(1000);
3437         }
3438         if (i >= 5) {
3439                 netdev_err(tp->dev,
3440                            "%s fails to set CPU PC, is %08x should be %08x\n",
3441                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3442                 return -ENODEV;
3443         }
3444         tw32(cpu_base + CPU_STATE, 0xffffffff);
3445         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3446         return 0;
3447 }
3448
3449
3450 /* tp->lock is held. */
3451 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3452 {
3453         u32 addr_high, addr_low;
3454         int i;
3455
3456         addr_high = ((tp->dev->dev_addr[0] << 8) |
3457                      tp->dev->dev_addr[1]);
3458         addr_low = ((tp->dev->dev_addr[2] << 24) |
3459                     (tp->dev->dev_addr[3] << 16) |
3460                     (tp->dev->dev_addr[4] <<  8) |
3461                     (tp->dev->dev_addr[5] <<  0));
3462         for (i = 0; i < 4; i++) {
3463                 if (i == 1 && skip_mac_1)
3464                         continue;
3465                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3466                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3467         }
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3471                 for (i = 0; i < 12; i++) {
3472                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3473                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3474                 }
3475         }
3476
3477         addr_high = (tp->dev->dev_addr[0] +
3478                      tp->dev->dev_addr[1] +
3479                      tp->dev->dev_addr[2] +
3480                      tp->dev->dev_addr[3] +
3481                      tp->dev->dev_addr[4] +
3482                      tp->dev->dev_addr[5]) &
3483                 TX_BACKOFF_SEED_MASK;
3484         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3485 }
3486
3487 static void tg3_enable_register_access(struct tg3 *tp)
3488 {
3489         /*
3490          * Make sure register accesses (indirect or otherwise) will function
3491          * correctly.
3492          */
3493         pci_write_config_dword(tp->pdev,
3494                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3495 }
3496
3497 static int tg3_power_up(struct tg3 *tp)
3498 {
3499         int err;
3500
3501         tg3_enable_register_access(tp);
3502
3503         err = pci_set_power_state(tp->pdev, PCI_D0);
3504         if (!err) {
3505                 /* Switch out of Vaux if it is a NIC */
3506                 tg3_pwrsrc_switch_to_vmain(tp);
3507         } else {
3508                 netdev_err(tp->dev, "Transition to D0 failed\n");
3509         }
3510
3511         return err;
3512 }
3513
3514 static int tg3_setup_phy(struct tg3 *, int);
3515
3516 static int tg3_power_down_prepare(struct tg3 *tp)
3517 {
3518         u32 misc_host_ctrl;
3519         bool device_should_wake, do_low_power;
3520
3521         tg3_enable_register_access(tp);
3522
3523         /* Restore the CLKREQ setting. */
3524         if (tg3_flag(tp, CLKREQ_BUG)) {
3525                 u16 lnkctl;
3526
3527                 pci_read_config_word(tp->pdev,
3528                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3529                                      &lnkctl);
3530                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3531                 pci_write_config_word(tp->pdev,
3532                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3533                                       lnkctl);
3534         }
3535
3536         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3537         tw32(TG3PCI_MISC_HOST_CTRL,
3538              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3539
3540         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3541                              tg3_flag(tp, WOL_ENABLE);
3542
3543         if (tg3_flag(tp, USE_PHYLIB)) {
3544                 do_low_power = false;
3545                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3546                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3547                         struct phy_device *phydev;
3548                         u32 phyid, advertising;
3549
3550                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3551
3552                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3553
3554                         tp->link_config.orig_speed = phydev->speed;
3555                         tp->link_config.orig_duplex = phydev->duplex;
3556                         tp->link_config.orig_autoneg = phydev->autoneg;
3557                         tp->link_config.orig_advertising = phydev->advertising;
3558
3559                         advertising = ADVERTISED_TP |
3560                                       ADVERTISED_Pause |
3561                                       ADVERTISED_Autoneg |
3562                                       ADVERTISED_10baseT_Half;
3563
3564                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3565                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3566                                         advertising |=
3567                                                 ADVERTISED_100baseT_Half |
3568                                                 ADVERTISED_100baseT_Full |
3569                                                 ADVERTISED_10baseT_Full;
3570                                 else
3571                                         advertising |= ADVERTISED_10baseT_Full;
3572                         }
3573
3574                         phydev->advertising = advertising;
3575
3576                         phy_start_aneg(phydev);
3577
3578                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3579                         if (phyid != PHY_ID_BCMAC131) {
3580                                 phyid &= PHY_BCM_OUI_MASK;
3581                                 if (phyid == PHY_BCM_OUI_1 ||
3582                                     phyid == PHY_BCM_OUI_2 ||
3583                                     phyid == PHY_BCM_OUI_3)
3584                                         do_low_power = true;
3585                         }
3586                 }
3587         } else {
3588                 do_low_power = true;
3589
3590                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3591                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3592                         tp->link_config.orig_speed = tp->link_config.speed;
3593                         tp->link_config.orig_duplex = tp->link_config.duplex;
3594                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3595                 }
3596
3597                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3598                         tp->link_config.speed = SPEED_10;
3599                         tp->link_config.duplex = DUPLEX_HALF;
3600                         tp->link_config.autoneg = AUTONEG_ENABLE;
3601                         tg3_setup_phy(tp, 0);
3602                 }
3603         }
3604
3605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3606                 u32 val;
3607
3608                 val = tr32(GRC_VCPU_EXT_CTRL);
3609                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3610         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3611                 int i;
3612                 u32 val;
3613
3614                 for (i = 0; i < 200; i++) {
3615                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3616                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3617                                 break;
3618                         msleep(1);
3619                 }
3620         }
3621         if (tg3_flag(tp, WOL_CAP))
3622                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3623                                                      WOL_DRV_STATE_SHUTDOWN |
3624                                                      WOL_DRV_WOL |
3625                                                      WOL_SET_MAGIC_PKT);
3626
3627         if (device_should_wake) {
3628                 u32 mac_mode;
3629
3630                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3631                         if (do_low_power &&
3632                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3633                                 tg3_phy_auxctl_write(tp,
3634                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3635                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3636                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3637                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3638                                 udelay(40);
3639                         }
3640
3641                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3642                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3643                         else
3644                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3645
3646                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3648                             ASIC_REV_5700) {
3649                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3650                                              SPEED_100 : SPEED_10;
3651                                 if (tg3_5700_link_polarity(tp, speed))
3652                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3653                                 else
3654                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3655                         }
3656                 } else {
3657                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3658                 }
3659
3660                 if (!tg3_flag(tp, 5750_PLUS))
3661                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3662
3663                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3664                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3665                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3666                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3667
3668                 if (tg3_flag(tp, ENABLE_APE))
3669                         mac_mode |= MAC_MODE_APE_TX_EN |
3670                                     MAC_MODE_APE_RX_EN |
3671                                     MAC_MODE_TDE_ENABLE;
3672
3673                 tw32_f(MAC_MODE, mac_mode);
3674                 udelay(100);
3675
3676                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3677                 udelay(10);
3678         }
3679
3680         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3681             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3682              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3683                 u32 base_val;
3684
3685                 base_val = tp->pci_clock_ctrl;
3686                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3687                              CLOCK_CTRL_TXCLK_DISABLE);
3688
3689                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3690                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3691         } else if (tg3_flag(tp, 5780_CLASS) ||
3692                    tg3_flag(tp, CPMU_PRESENT) ||
3693                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3694                 /* do nothing */
3695         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3696                 u32 newbits1, newbits2;
3697
3698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3699                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3700                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3701                                     CLOCK_CTRL_TXCLK_DISABLE |
3702                                     CLOCK_CTRL_ALTCLK);
3703                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3704                 } else if (tg3_flag(tp, 5705_PLUS)) {
3705                         newbits1 = CLOCK_CTRL_625_CORE;
3706                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3707                 } else {
3708                         newbits1 = CLOCK_CTRL_ALTCLK;
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 }
3711
3712                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3713                             40);
3714
3715                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3716                             40);
3717
3718                 if (!tg3_flag(tp, 5705_PLUS)) {
3719                         u32 newbits3;
3720
3721                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3722                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3723                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3724                                             CLOCK_CTRL_TXCLK_DISABLE |
3725                                             CLOCK_CTRL_44MHZ_CORE);
3726                         } else {
3727                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3728                         }
3729
3730                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3731                                     tp->pci_clock_ctrl | newbits3, 40);
3732                 }
3733         }
3734
3735         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3736                 tg3_power_down_phy(tp, do_low_power);
3737
3738         tg3_frob_aux_power(tp, true);
3739
3740         /* Workaround for unstable PLL clock */
3741         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3742             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3743                 u32 val = tr32(0x7d00);
3744
3745                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3746                 tw32(0x7d00, val);
3747                 if (!tg3_flag(tp, ENABLE_ASF)) {
3748                         int err;
3749
3750                         err = tg3_nvram_lock(tp);
3751                         tg3_halt_cpu(tp, RX_CPU_BASE);
3752                         if (!err)
3753                                 tg3_nvram_unlock(tp);
3754                 }
3755         }
3756
3757         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3758
3759         return 0;
3760 }
3761
3762 static void tg3_power_down(struct tg3 *tp)
3763 {
3764         tg3_power_down_prepare(tp);
3765
3766         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3767         pci_set_power_state(tp->pdev, PCI_D3hot);
3768 }
3769
3770 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3771 {
3772         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3773         case MII_TG3_AUX_STAT_10HALF:
3774                 *speed = SPEED_10;
3775                 *duplex = DUPLEX_HALF;
3776                 break;
3777
3778         case MII_TG3_AUX_STAT_10FULL:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_FULL;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_100HALF:
3784                 *speed = SPEED_100;
3785                 *duplex = DUPLEX_HALF;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100FULL:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_FULL;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_1000HALF:
3794                 *speed = SPEED_1000;
3795                 *duplex = DUPLEX_HALF;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000FULL:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_FULL;
3801                 break;
3802
3803         default:
3804                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3805                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3806                                  SPEED_10;
3807                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3808                                   DUPLEX_HALF;
3809                         break;
3810                 }
3811                 *speed = SPEED_INVALID;
3812                 *duplex = DUPLEX_INVALID;
3813                 break;
3814         }
3815 }
3816
3817 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3818 {
3819         int err = 0;
3820         u32 val, new_adv;
3821
3822         new_adv = ADVERTISE_CSMA;
3823         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3824         new_adv |= mii_advertise_flowctrl(flowctrl);
3825
3826         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3827         if (err)
3828                 goto done;
3829
3830         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3831                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3832
3833                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3834                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3835                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3836
3837                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3838                 if (err)
3839                         goto done;
3840         }
3841
3842         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3843                 goto done;
3844
3845         tw32(TG3_CPMU_EEE_MODE,
3846              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3847
3848         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3849         if (!err) {
3850                 u32 err2;
3851
3852                 val = 0;
3853                 /* Advertise 100-BaseTX EEE ability */
3854                 if (advertise & ADVERTISED_100baseT_Full)
3855                         val |= MDIO_AN_EEE_ADV_100TX;
3856                 /* Advertise 1000-BaseT EEE ability */
3857                 if (advertise & ADVERTISED_1000baseT_Full)
3858                         val |= MDIO_AN_EEE_ADV_1000T;
3859                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3860                 if (err)
3861                         val = 0;
3862
3863                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3864                 case ASIC_REV_5717:
3865                 case ASIC_REV_57765:
3866                 case ASIC_REV_57766:
3867                 case ASIC_REV_5719:
3868                         /* If we advertised any eee advertisements above... */
3869                         if (val)
3870                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3871                                       MII_TG3_DSP_TAP26_RMRXSTO |
3872                                       MII_TG3_DSP_TAP26_OPCSINPT;
3873                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3874                         /* Fall through */
3875                 case ASIC_REV_5720:
3876                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3877                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3878                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3879                 }
3880
3881                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3882                 if (!err)
3883                         err = err2;
3884         }
3885
3886 done:
3887         return err;
3888 }
3889
3890 static void tg3_phy_copper_begin(struct tg3 *tp)
3891 {
3892         u32 new_adv;
3893         int i;
3894
3895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3896                 new_adv = ADVERTISED_10baseT_Half |
3897                           ADVERTISED_10baseT_Full;
3898                 if (tg3_flag(tp, WOL_SPEED_100MB))
3899                         new_adv |= ADVERTISED_100baseT_Half |
3900                                    ADVERTISED_100baseT_Full;
3901
3902                 tg3_phy_autoneg_cfg(tp, new_adv,
3903                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3904         } else if (tp->link_config.speed == SPEED_INVALID) {
3905                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3906                         tp->link_config.advertising &=
3907                                 ~(ADVERTISED_1000baseT_Half |
3908                                   ADVERTISED_1000baseT_Full);
3909
3910                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3911                                     tp->link_config.flowctrl);
3912         } else {
3913                 /* Asking for a specific link mode. */
3914                 if (tp->link_config.speed == SPEED_1000) {
3915                         if (tp->link_config.duplex == DUPLEX_FULL)
3916                                 new_adv = ADVERTISED_1000baseT_Full;
3917                         else
3918                                 new_adv = ADVERTISED_1000baseT_Half;
3919                 } else if (tp->link_config.speed == SPEED_100) {
3920                         if (tp->link_config.duplex == DUPLEX_FULL)
3921                                 new_adv = ADVERTISED_100baseT_Full;
3922                         else
3923                                 new_adv = ADVERTISED_100baseT_Half;
3924                 } else {
3925                         if (tp->link_config.duplex == DUPLEX_FULL)
3926                                 new_adv = ADVERTISED_10baseT_Full;
3927                         else
3928                                 new_adv = ADVERTISED_10baseT_Half;
3929                 }
3930
3931                 tg3_phy_autoneg_cfg(tp, new_adv,
3932                                     tp->link_config.flowctrl);
3933         }
3934
3935         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3936             tp->link_config.speed != SPEED_INVALID) {
3937                 u32 bmcr, orig_bmcr;
3938
3939                 tp->link_config.active_speed = tp->link_config.speed;
3940                 tp->link_config.active_duplex = tp->link_config.duplex;
3941
3942                 bmcr = 0;
3943                 switch (tp->link_config.speed) {
3944                 default:
3945                 case SPEED_10:
3946                         break;
3947
3948                 case SPEED_100:
3949                         bmcr |= BMCR_SPEED100;
3950                         break;
3951
3952                 case SPEED_1000:
3953                         bmcr |= BMCR_SPEED1000;
3954                         break;
3955                 }
3956
3957                 if (tp->link_config.duplex == DUPLEX_FULL)
3958                         bmcr |= BMCR_FULLDPLX;
3959
3960                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3961                     (bmcr != orig_bmcr)) {
3962                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3963                         for (i = 0; i < 1500; i++) {
3964                                 u32 tmp;
3965
3966                                 udelay(10);
3967                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3968                                     tg3_readphy(tp, MII_BMSR, &tmp))
3969                                         continue;
3970                                 if (!(tmp & BMSR_LSTATUS)) {
3971                                         udelay(40);
3972                                         break;
3973                                 }
3974                         }
3975                         tg3_writephy(tp, MII_BMCR, bmcr);
3976                         udelay(40);
3977                 }
3978         } else {
3979                 tg3_writephy(tp, MII_BMCR,
3980                              BMCR_ANENABLE | BMCR_ANRESTART);
3981         }
3982 }
3983
3984 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3985 {
3986         int err;
3987
3988         /* Turn off tap power management. */
3989         /* Set Extended packet length bit */
3990         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3991
3992         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3993         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3994         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3995         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3996         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3997
3998         udelay(40);
3999
4000         return err;
4001 }
4002
4003 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4004 {
4005         u32 advmsk, tgtadv, advertising;
4006
4007         advertising = tp->link_config.advertising;
4008         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4009
4010         advmsk = ADVERTISE_ALL;
4011         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4012                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4013                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4014         }
4015
4016         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4017                 return false;
4018
4019         if ((*lcladv & advmsk) != tgtadv)
4020                 return false;
4021
4022         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4023                 u32 tg3_ctrl;
4024
4025                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4026
4027                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4028                         return false;
4029
4030                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4031                 if (tg3_ctrl != tgtadv)
4032                         return false;
4033         }
4034
4035         return true;
4036 }
4037
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4039 {
4040         u32 lpeth = 0;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 u32 val;
4044
4045                 if (tg3_readphy(tp, MII_STAT1000, &val))
4046                         return false;
4047
4048                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4049         }
4050
4051         if (tg3_readphy(tp, MII_LPA, rmtadv))
4052                 return false;
4053
4054         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055         tp->link_config.rmt_adv = lpeth;
4056
4057         return true;
4058 }
4059
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4061 {
4062         int current_link_up;
4063         u32 bmsr, val;
4064         u32 lcl_adv, rmt_adv;
4065         u16 current_speed;
4066         u8 current_duplex;
4067         int i, err;
4068
4069         tw32(MAC_EVENT, 0);
4070
4071         tw32_f(MAC_STATUS,
4072              (MAC_STATUS_SYNC_CHANGED |
4073               MAC_STATUS_CFG_CHANGED |
4074               MAC_STATUS_MI_COMPLETION |
4075               MAC_STATUS_LNKSTATE_CHANGED));
4076         udelay(40);
4077
4078         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4079                 tw32_f(MAC_MI_MODE,
4080                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4081                 udelay(80);
4082         }
4083
4084         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4085
4086         /* Some third-party PHYs need to be reset on link going
4087          * down.
4088          */
4089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092             netif_carrier_ok(tp->dev)) {
4093                 tg3_readphy(tp, MII_BMSR, &bmsr);
4094                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095                     !(bmsr & BMSR_LSTATUS))
4096                         force_reset = 1;
4097         }
4098         if (force_reset)
4099                 tg3_phy_reset(tp);
4100
4101         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102                 tg3_readphy(tp, MII_BMSR, &bmsr);
4103                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104                     !tg3_flag(tp, INIT_COMPLETE))
4105                         bmsr = 0;
4106
4107                 if (!(bmsr & BMSR_LSTATUS)) {
4108                         err = tg3_init_5401phy_dsp(tp);
4109                         if (err)
4110                                 return err;
4111
4112                         tg3_readphy(tp, MII_BMSR, &bmsr);
4113                         for (i = 0; i < 1000; i++) {
4114                                 udelay(10);
4115                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116                                     (bmsr & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121
4122                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123                             TG3_PHY_REV_BCM5401_B0 &&
4124                             !(bmsr & BMSR_LSTATUS) &&
4125                             tp->link_config.active_speed == SPEED_1000) {
4126                                 err = tg3_phy_reset(tp);
4127                                 if (!err)
4128                                         err = tg3_init_5401phy_dsp(tp);
4129                                 if (err)
4130                                         return err;
4131                         }
4132                 }
4133         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135                 /* 5701 {A0,B0} CRC bug workaround */
4136                 tg3_writephy(tp, 0x15, 0x0a75);
4137                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4140         }
4141
4142         /* Clear pending interrupts... */
4143         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145
4146         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4150
4151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4156                 else
4157                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4158         }
4159
4160         current_link_up = 0;
4161         current_speed = SPEED_INVALID;
4162         current_duplex = DUPLEX_INVALID;
4163         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164         tp->link_config.rmt_adv = 0;
4165
4166         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167                 err = tg3_phy_auxctl_read(tp,
4168                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4169                                           &val);
4170                 if (!err && !(val & (1 << 10))) {
4171                         tg3_phy_auxctl_write(tp,
4172                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173                                              val | (1 << 10));
4174                         goto relink;
4175                 }
4176         }
4177
4178         bmsr = 0;
4179         for (i = 0; i < 100; i++) {
4180                 tg3_readphy(tp, MII_BMSR, &bmsr);
4181                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182                     (bmsr & BMSR_LSTATUS))
4183                         break;
4184                 udelay(40);
4185         }
4186
4187         if (bmsr & BMSR_LSTATUS) {
4188                 u32 aux_stat, bmcr;
4189
4190                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191                 for (i = 0; i < 2000; i++) {
4192                         udelay(10);
4193                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4194                             aux_stat)
4195                                 break;
4196                 }
4197
4198                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4199                                              &current_speed,
4200                                              &current_duplex);
4201
4202                 bmcr = 0;
4203                 for (i = 0; i < 200; i++) {
4204                         tg3_readphy(tp, MII_BMCR, &bmcr);
4205                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4206                                 continue;
4207                         if (bmcr && bmcr != 0x7fff)
4208                                 break;
4209                         udelay(10);
4210                 }
4211
4212                 lcl_adv = 0;
4213                 rmt_adv = 0;
4214
4215                 tp->link_config.active_speed = current_speed;
4216                 tp->link_config.active_duplex = current_duplex;
4217
4218                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219                         if ((bmcr & BMCR_ANENABLE) &&
4220                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222                                 current_link_up = 1;
4223                 } else {
4224                         if (!(bmcr & BMCR_ANENABLE) &&
4225                             tp->link_config.speed == current_speed &&
4226                             tp->link_config.duplex == current_duplex &&
4227                             tp->link_config.flowctrl ==
4228                             tp->link_config.active_flowctrl) {
4229                                 current_link_up = 1;
4230                         }
4231                 }
4232
4233                 if (current_link_up == 1 &&
4234                     tp->link_config.active_duplex == DUPLEX_FULL) {
4235                         u32 reg, bit;
4236
4237                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238                                 reg = MII_TG3_FET_GEN_STAT;
4239                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4240                         } else {
4241                                 reg = MII_TG3_EXT_STAT;
4242                                 bit = MII_TG3_EXT_STAT_MDIX;
4243                         }
4244
4245                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4247
4248                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4249                 }
4250         }
4251
4252 relink:
4253         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254                 tg3_phy_copper_begin(tp);
4255
4256                 tg3_readphy(tp, MII_BMSR, &bmsr);
4257                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259                         current_link_up = 1;
4260         }
4261
4262         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263         if (current_link_up == 1) {
4264                 if (tp->link_config.active_speed == SPEED_100 ||
4265                     tp->link_config.active_speed == SPEED_10)
4266                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4267                 else
4268                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4271         else
4272                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4273
4274         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275         if (tp->link_config.active_duplex == DUPLEX_HALF)
4276                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4277
4278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279                 if (current_link_up == 1 &&
4280                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4282                 else
4283                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4284         }
4285
4286         /* ??? Without this setting Netgear GA302T PHY does not
4287          * ??? send/receive packets...
4288          */
4289         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4293                 udelay(80);
4294         }
4295
4296         tw32_f(MAC_MODE, tp->mac_mode);
4297         udelay(40);
4298
4299         tg3_phy_eee_adjust(tp, current_link_up);
4300
4301         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302                 /* Polled via timer. */
4303                 tw32_f(MAC_EVENT, 0);
4304         } else {
4305                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4306         }
4307         udelay(40);
4308
4309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310             current_link_up == 1 &&
4311             tp->link_config.active_speed == SPEED_1000 &&
4312             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4313                 udelay(120);
4314                 tw32_f(MAC_STATUS,
4315                      (MAC_STATUS_SYNC_CHANGED |
4316                       MAC_STATUS_CFG_CHANGED));
4317                 udelay(40);
4318                 tg3_write_mem(tp,
4319                               NIC_SRAM_FIRMWARE_MBOX,
4320                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4321         }
4322
4323         /* Prevent send BD corruption. */
4324         if (tg3_flag(tp, CLKREQ_BUG)) {
4325                 u16 oldlnkctl, newlnkctl;
4326
4327                 pci_read_config_word(tp->pdev,
4328                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4329                                      &oldlnkctl);
4330                 if (tp->link_config.active_speed == SPEED_100 ||
4331                     tp->link_config.active_speed == SPEED_10)
4332                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4333                 else
4334                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335                 if (newlnkctl != oldlnkctl)
4336                         pci_write_config_word(tp->pdev,
4337                                               pci_pcie_cap(tp->pdev) +
4338                                               PCI_EXP_LNKCTL, newlnkctl);
4339         }
4340
4341         if (current_link_up != netif_carrier_ok(tp->dev)) {
4342                 if (current_link_up)
4343                         netif_carrier_on(tp->dev);
4344                 else
4345                         netif_carrier_off(tp->dev);
4346                 tg3_link_report(tp);
4347         }
4348
4349         return 0;
4350 }
4351
4352 struct tg3_fiber_aneginfo {
4353         int state;
4354 #define ANEG_STATE_UNKNOWN              0
4355 #define ANEG_STATE_AN_ENABLE            1
4356 #define ANEG_STATE_RESTART_INIT         2
4357 #define ANEG_STATE_RESTART              3
4358 #define ANEG_STATE_DISABLE_LINK_OK      4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4360 #define ANEG_STATE_ABILITY_DETECT       6
4361 #define ANEG_STATE_ACK_DETECT_INIT      7
4362 #define ANEG_STATE_ACK_DETECT           8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4364 #define ANEG_STATE_COMPLETE_ACK         10
4365 #define ANEG_STATE_IDLE_DETECT_INIT     11
4366 #define ANEG_STATE_IDLE_DETECT          12
4367 #define ANEG_STATE_LINK_OK              13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4370
4371         u32 flags;
4372 #define MR_AN_ENABLE            0x00000001
4373 #define MR_RESTART_AN           0x00000002
4374 #define MR_AN_COMPLETE          0x00000004
4375 #define MR_PAGE_RX              0x00000008
4376 #define MR_NP_LOADED            0x00000010
4377 #define MR_TOGGLE_TX            0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4385 #define MR_TOGGLE_RX            0x00002000
4386 #define MR_NP_RX                0x00004000
4387
4388 #define MR_LINK_OK              0x80000000
4389
4390         unsigned long link_time, cur_time;
4391
4392         u32 ability_match_cfg;
4393         int ability_match_count;
4394
4395         char ability_match, idle_match, ack_match;
4396
4397         u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP             0x00000080
4399 #define ANEG_CFG_ACK            0x00000040
4400 #define ANEG_CFG_RF2            0x00000020
4401 #define ANEG_CFG_RF1            0x00000010
4402 #define ANEG_CFG_PS2            0x00000001
4403 #define ANEG_CFG_PS1            0x00008000
4404 #define ANEG_CFG_HD             0x00004000
4405 #define ANEG_CFG_FD             0x00002000
4406 #define ANEG_CFG_INVAL          0x00001f06
4407
4408 };
4409 #define ANEG_OK         0
4410 #define ANEG_DONE       1
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED     -1
4413
4414 #define ANEG_STATE_SETTLE_TIME  10000
4415
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417                                    struct tg3_fiber_aneginfo *ap)
4418 {
4419         u16 flowctrl;
4420         unsigned long delta;
4421         u32 rx_cfg_reg;
4422         int ret;
4423
4424         if (ap->state == ANEG_STATE_UNKNOWN) {
4425                 ap->rxconfig = 0;
4426                 ap->link_time = 0;
4427                 ap->cur_time = 0;
4428                 ap->ability_match_cfg = 0;
4429                 ap->ability_match_count = 0;
4430                 ap->ability_match = 0;
4431                 ap->idle_match = 0;
4432                 ap->ack_match = 0;
4433         }
4434         ap->cur_time++;
4435
4436         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4438
4439                 if (rx_cfg_reg != ap->ability_match_cfg) {
4440                         ap->ability_match_cfg = rx_cfg_reg;
4441                         ap->ability_match = 0;
4442                         ap->ability_match_count = 0;
4443                 } else {
4444                         if (++ap->ability_match_count > 1) {
4445                                 ap->ability_match = 1;
4446                                 ap->ability_match_cfg = rx_cfg_reg;
4447                         }
4448                 }
4449                 if (rx_cfg_reg & ANEG_CFG_ACK)
4450                         ap->ack_match = 1;
4451                 else
4452                         ap->ack_match = 0;
4453
4454                 ap->idle_match = 0;
4455         } else {
4456                 ap->idle_match = 1;
4457                 ap->ability_match_cfg = 0;
4458                 ap->ability_match_count = 0;
4459                 ap->ability_match = 0;
4460                 ap->ack_match = 0;
4461
4462                 rx_cfg_reg = 0;
4463         }
4464
4465         ap->rxconfig = rx_cfg_reg;
4466         ret = ANEG_OK;
4467
4468         switch (ap->state) {
4469         case ANEG_STATE_UNKNOWN:
4470                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471                         ap->state = ANEG_STATE_AN_ENABLE;
4472
4473                 /* fallthru */
4474         case ANEG_STATE_AN_ENABLE:
4475                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476                 if (ap->flags & MR_AN_ENABLE) {
4477                         ap->link_time = 0;
4478                         ap->cur_time = 0;
4479                         ap->ability_match_cfg = 0;
4480                         ap->ability_match_count = 0;
4481                         ap->ability_match = 0;
4482                         ap->idle_match = 0;
4483                         ap->ack_match = 0;
4484
4485                         ap->state = ANEG_STATE_RESTART_INIT;
4486                 } else {
4487                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4488                 }
4489                 break;
4490
4491         case ANEG_STATE_RESTART_INIT:
4492                 ap->link_time = ap->cur_time;
4493                 ap->flags &= ~(MR_NP_LOADED);
4494                 ap->txconfig = 0;
4495                 tw32(MAC_TX_AUTO_NEG, 0);
4496                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497                 tw32_f(MAC_MODE, tp->mac_mode);
4498                 udelay(40);
4499
4500                 ret = ANEG_TIMER_ENAB;
4501                 ap->state = ANEG_STATE_RESTART;
4502
4503                 /* fallthru */
4504         case ANEG_STATE_RESTART:
4505                 delta = ap->cur_time - ap->link_time;
4506                 if (delta > ANEG_STATE_SETTLE_TIME)
4507                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4508                 else
4509                         ret = ANEG_TIMER_ENAB;
4510                 break;
4511
4512         case ANEG_STATE_DISABLE_LINK_OK:
4513                 ret = ANEG_DONE;
4514                 break;
4515
4516         case ANEG_STATE_ABILITY_DETECT_INIT:
4517                 ap->flags &= ~(MR_TOGGLE_TX);
4518                 ap->txconfig = ANEG_CFG_FD;
4519                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520                 if (flowctrl & ADVERTISE_1000XPAUSE)
4521                         ap->txconfig |= ANEG_CFG_PS1;
4522                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523                         ap->txconfig |= ANEG_CFG_PS2;
4524                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526                 tw32_f(MAC_MODE, tp->mac_mode);
4527                 udelay(40);
4528
4529                 ap->state = ANEG_STATE_ABILITY_DETECT;
4530                 break;
4531
4532         case ANEG_STATE_ABILITY_DETECT:
4533                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4535                 break;
4536
4537         case ANEG_STATE_ACK_DETECT_INIT:
4538                 ap->txconfig |= ANEG_CFG_ACK;
4539                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541                 tw32_f(MAC_MODE, tp->mac_mode);
4542                 udelay(40);
4543
4544                 ap->state = ANEG_STATE_ACK_DETECT;
4545
4546                 /* fallthru */
4547         case ANEG_STATE_ACK_DETECT:
4548                 if (ap->ack_match != 0) {
4549                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4552                         } else {
4553                                 ap->state = ANEG_STATE_AN_ENABLE;
4554                         }
4555                 } else if (ap->ability_match != 0 &&
4556                            ap->rxconfig == 0) {
4557                         ap->state = ANEG_STATE_AN_ENABLE;
4558                 }
4559                 break;
4560
4561         case ANEG_STATE_COMPLETE_ACK_INIT:
4562                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4563                         ret = ANEG_FAILED;
4564                         break;
4565                 }
4566                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567                                MR_LP_ADV_HALF_DUPLEX |
4568                                MR_LP_ADV_SYM_PAUSE |
4569                                MR_LP_ADV_ASYM_PAUSE |
4570                                MR_LP_ADV_REMOTE_FAULT1 |
4571                                MR_LP_ADV_REMOTE_FAULT2 |
4572                                MR_LP_ADV_NEXT_PAGE |
4573                                MR_TOGGLE_RX |
4574                                MR_NP_RX);
4575                 if (ap->rxconfig & ANEG_CFG_FD)
4576                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577                 if (ap->rxconfig & ANEG_CFG_HD)
4578                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579                 if (ap->rxconfig & ANEG_CFG_PS1)
4580                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581                 if (ap->rxconfig & ANEG_CFG_PS2)
4582                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583                 if (ap->rxconfig & ANEG_CFG_RF1)
4584                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585                 if (ap->rxconfig & ANEG_CFG_RF2)
4586                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587                 if (ap->rxconfig & ANEG_CFG_NP)
4588                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4589
4590                 ap->link_time = ap->cur_time;
4591
4592                 ap->flags ^= (MR_TOGGLE_TX);
4593                 if (ap->rxconfig & 0x0008)
4594                         ap->flags |= MR_TOGGLE_RX;
4595                 if (ap->rxconfig & ANEG_CFG_NP)
4596                         ap->flags |= MR_NP_RX;
4597                 ap->flags |= MR_PAGE_RX;
4598
4599                 ap->state = ANEG_STATE_COMPLETE_ACK;
4600                 ret = ANEG_TIMER_ENAB;
4601                 break;
4602
4603         case ANEG_STATE_COMPLETE_ACK:
4604                 if (ap->ability_match != 0 &&
4605                     ap->rxconfig == 0) {
4606                         ap->state = ANEG_STATE_AN_ENABLE;
4607                         break;
4608                 }
4609                 delta = ap->cur_time - ap->link_time;
4610                 if (delta > ANEG_STATE_SETTLE_TIME) {
4611                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4613                         } else {
4614                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615                                     !(ap->flags & MR_NP_RX)) {
4616                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4617                                 } else {
4618                                         ret = ANEG_FAILED;
4619                                 }
4620                         }
4621                 }
4622                 break;
4623
4624         case ANEG_STATE_IDLE_DETECT_INIT:
4625                 ap->link_time = ap->cur_time;
4626                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627                 tw32_f(MAC_MODE, tp->mac_mode);
4628                 udelay(40);
4629
4630                 ap->state = ANEG_STATE_IDLE_DETECT;
4631                 ret = ANEG_TIMER_ENAB;
4632                 break;
4633
4634         case ANEG_STATE_IDLE_DETECT:
4635                 if (ap->ability_match != 0 &&
4636                     ap->rxconfig == 0) {
4637                         ap->state = ANEG_STATE_AN_ENABLE;
4638                         break;
4639                 }
4640                 delta = ap->cur_time - ap->link_time;
4641                 if (delta > ANEG_STATE_SETTLE_TIME) {
4642                         /* XXX another gem from the Broadcom driver :( */
4643                         ap->state = ANEG_STATE_LINK_OK;
4644                 }
4645                 break;
4646
4647         case ANEG_STATE_LINK_OK:
4648                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4649                 ret = ANEG_DONE;
4650                 break;
4651
4652         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653                 /* ??? unimplemented */
4654                 break;
4655
4656         case ANEG_STATE_NEXT_PAGE_WAIT:
4657                 /* ??? unimplemented */
4658                 break;
4659
4660         default:
4661                 ret = ANEG_FAILED;
4662                 break;
4663         }
4664
4665         return ret;
4666 }
4667
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4669 {
4670         int res = 0;
4671         struct tg3_fiber_aneginfo aninfo;
4672         int status = ANEG_FAILED;
4673         unsigned int tick;
4674         u32 tmp;
4675
4676         tw32_f(MAC_TX_AUTO_NEG, 0);
4677
4678         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4680         udelay(40);
4681
4682         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4683         udelay(40);
4684
4685         memset(&aninfo, 0, sizeof(aninfo));
4686         aninfo.flags |= MR_AN_ENABLE;
4687         aninfo.state = ANEG_STATE_UNKNOWN;
4688         aninfo.cur_time = 0;
4689         tick = 0;
4690         while (++tick < 195000) {
4691                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692                 if (status == ANEG_DONE || status == ANEG_FAILED)
4693                         break;
4694
4695                 udelay(1);
4696         }
4697
4698         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         *txflags = aninfo.txconfig;
4703         *rxflags = aninfo.flags;
4704
4705         if (status == ANEG_DONE &&
4706             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707                              MR_LP_ADV_FULL_DUPLEX)))
4708                 res = 1;
4709
4710         return res;
4711 }
4712
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4714 {
4715         u32 mac_status = tr32(MAC_STATUS);
4716         int i;
4717
4718         /* Reset when initting first time or we have a link. */
4719         if (tg3_flag(tp, INIT_COMPLETE) &&
4720             !(mac_status & MAC_STATUS_PCS_SYNCED))
4721                 return;
4722
4723         /* Set PLL lock range. */
4724         tg3_writephy(tp, 0x16, 0x8007);
4725
4726         /* SW reset */
4727         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4728
4729         /* Wait for reset to complete. */
4730         /* XXX schedule_timeout() ... */
4731         for (i = 0; i < 500; i++)
4732                 udelay(10);
4733
4734         /* Config mode; select PMA/Ch 1 regs. */
4735         tg3_writephy(tp, 0x10, 0x8411);
4736
4737         /* Enable auto-lock and comdet, select txclk for tx. */
4738         tg3_writephy(tp, 0x11, 0x0a10);
4739
4740         tg3_writephy(tp, 0x18, 0x00a0);
4741         tg3_writephy(tp, 0x16, 0x41ff);
4742
4743         /* Assert and deassert POR. */
4744         tg3_writephy(tp, 0x13, 0x0400);
4745         udelay(40);
4746         tg3_writephy(tp, 0x13, 0x0000);
4747
4748         tg3_writephy(tp, 0x11, 0x0a50);
4749         udelay(40);
4750         tg3_writephy(tp, 0x11, 0x0a10);
4751
4752         /* Wait for signal to stabilize */
4753         /* XXX schedule_timeout() ... */
4754         for (i = 0; i < 15000; i++)
4755                 udelay(10);
4756
4757         /* Deselect the channel register so we can read the PHYID
4758          * later.
4759          */
4760         tg3_writephy(tp, 0x10, 0x8011);
4761 }
4762
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4764 {
4765         u16 flowctrl;
4766         u32 sg_dig_ctrl, sg_dig_status;
4767         u32 serdes_cfg, expected_sg_dig_ctrl;
4768         int workaround, port_a;
4769         int current_link_up;
4770
4771         serdes_cfg = 0;
4772         expected_sg_dig_ctrl = 0;
4773         workaround = 0;
4774         port_a = 1;
4775         current_link_up = 0;
4776
4777         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4779                 workaround = 1;
4780                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4781                         port_a = 0;
4782
4783                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784                 /* preserve bits 20-23 for voltage regulator */
4785                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4786         }
4787
4788         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4789
4790         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4792                         if (workaround) {
4793                                 u32 val = serdes_cfg;
4794
4795                                 if (port_a)
4796                                         val |= 0xc010000;
4797                                 else
4798                                         val |= 0x4010000;
4799                                 tw32_f(MAC_SERDES_CFG, val);
4800                         }
4801
4802                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4803                 }
4804                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805                         tg3_setup_flow_control(tp, 0, 0);
4806                         current_link_up = 1;
4807                 }
4808                 goto out;
4809         }
4810
4811         /* Want auto-negotiation.  */
4812         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4813
4814         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815         if (flowctrl & ADVERTISE_1000XPAUSE)
4816                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4819
4820         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822                     tp->serdes_counter &&
4823                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824                                     MAC_STATUS_RCVD_CFG)) ==
4825                      MAC_STATUS_PCS_SYNCED)) {
4826                         tp->serdes_counter--;
4827                         current_link_up = 1;
4828                         goto out;
4829                 }
4830 restart_autoneg:
4831                 if (workaround)
4832                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4834                 udelay(5);
4835                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4836
4837                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840                                  MAC_STATUS_SIGNAL_DET)) {
4841                 sg_dig_status = tr32(SG_DIG_STATUS);
4842                 mac_status = tr32(MAC_STATUS);
4843
4844                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846                         u32 local_adv = 0, remote_adv = 0;
4847
4848                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849                                 local_adv |= ADVERTISE_1000XPAUSE;
4850                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4852
4853                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854                                 remote_adv |= LPA_1000XPAUSE;
4855                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4857
4858                         tp->link_config.rmt_adv =
4859                                            mii_adv_to_ethtool_adv_x(remote_adv);
4860
4861                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4862                         current_link_up = 1;
4863                         tp->serdes_counter = 0;
4864                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866                         if (tp->serdes_counter)
4867                                 tp->serdes_counter--;
4868                         else {
4869                                 if (workaround) {
4870                                         u32 val = serdes_cfg;
4871
4872                                         if (port_a)
4873                                                 val |= 0xc010000;
4874                                         else
4875                                                 val |= 0x4010000;
4876
4877                                         tw32_f(MAC_SERDES_CFG, val);
4878                                 }
4879
4880                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4881                                 udelay(40);
4882
4883                                 /* Link parallel detection - link is up */
4884                                 /* only if we have PCS_SYNC and not */
4885                                 /* receiving config code words */
4886                                 mac_status = tr32(MAC_STATUS);
4887                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889                                         tg3_setup_flow_control(tp, 0, 0);
4890                                         current_link_up = 1;
4891                                         tp->phy_flags |=
4892                                                 TG3_PHYFLG_PARALLEL_DETECT;
4893                                         tp->serdes_counter =
4894                                                 SERDES_PARALLEL_DET_TIMEOUT;
4895                                 } else
4896                                         goto restart_autoneg;
4897                         }
4898                 }
4899         } else {
4900                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4902         }
4903
4904 out:
4905         return current_link_up;
4906 }
4907
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4909 {
4910         int current_link_up = 0;
4911
4912         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4913                 goto out;
4914
4915         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916                 u32 txflags, rxflags;
4917                 int i;
4918
4919                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920                         u32 local_adv = 0, remote_adv = 0;
4921
4922                         if (txflags & ANEG_CFG_PS1)
4923                                 local_adv |= ADVERTISE_1000XPAUSE;
4924                         if (txflags & ANEG_CFG_PS2)
4925                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4926
4927                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928                                 remote_adv |= LPA_1000XPAUSE;
4929                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4931
4932                         tp->link_config.rmt_adv =
4933                                            mii_adv_to_ethtool_adv_x(remote_adv);
4934
4935                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4936
4937                         current_link_up = 1;
4938                 }
4939                 for (i = 0; i < 30; i++) {
4940                         udelay(20);
4941                         tw32_f(MAC_STATUS,
4942                                (MAC_STATUS_SYNC_CHANGED |
4943                                 MAC_STATUS_CFG_CHANGED));
4944                         udelay(40);
4945                         if ((tr32(MAC_STATUS) &
4946                              (MAC_STATUS_SYNC_CHANGED |
4947                               MAC_STATUS_CFG_CHANGED)) == 0)
4948                                 break;
4949                 }
4950
4951                 mac_status = tr32(MAC_STATUS);
4952                 if (current_link_up == 0 &&
4953                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954                     !(mac_status & MAC_STATUS_RCVD_CFG))
4955                         current_link_up = 1;
4956         } else {
4957                 tg3_setup_flow_control(tp, 0, 0);
4958
4959                 /* Forcing 1000FD link up. */
4960                 current_link_up = 1;
4961
4962                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4963                 udelay(40);
4964
4965                 tw32_f(MAC_MODE, tp->mac_mode);
4966                 udelay(40);
4967         }
4968
4969 out:
4970         return current_link_up;
4971 }
4972
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4974 {
4975         u32 orig_pause_cfg;
4976         u16 orig_active_speed;
4977         u8 orig_active_duplex;
4978         u32 mac_status;
4979         int current_link_up;
4980         int i;
4981
4982         orig_pause_cfg = tp->link_config.active_flowctrl;
4983         orig_active_speed = tp->link_config.active_speed;
4984         orig_active_duplex = tp->link_config.active_duplex;
4985
4986         if (!tg3_flag(tp, HW_AUTONEG) &&
4987             netif_carrier_ok(tp->dev) &&
4988             tg3_flag(tp, INIT_COMPLETE)) {
4989                 mac_status = tr32(MAC_STATUS);
4990                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991                                MAC_STATUS_SIGNAL_DET |
4992                                MAC_STATUS_CFG_CHANGED |
4993                                MAC_STATUS_RCVD_CFG);
4994                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995                                    MAC_STATUS_SIGNAL_DET)) {
4996                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997                                             MAC_STATUS_CFG_CHANGED));
4998                         return 0;
4999                 }
5000         }
5001
5002         tw32_f(MAC_TX_AUTO_NEG, 0);
5003
5004         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006         tw32_f(MAC_MODE, tp->mac_mode);
5007         udelay(40);
5008
5009         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010                 tg3_init_bcm8002(tp);
5011
5012         /* Enable link change event even when serdes polling.  */
5013         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5014         udelay(40);
5015
5016         current_link_up = 0;
5017         tp->link_config.rmt_adv = 0;
5018         mac_status = tr32(MAC_STATUS);
5019
5020         if (tg3_flag(tp, HW_AUTONEG))
5021                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5022         else
5023                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5024
5025         tp->napi[0].hw_status->status =
5026                 (SD_STATUS_UPDATED |
5027                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5028
5029         for (i = 0; i < 100; i++) {
5030                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031                                     MAC_STATUS_CFG_CHANGED));
5032                 udelay(5);
5033                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034                                          MAC_STATUS_CFG_CHANGED |
5035                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5036                         break;
5037         }
5038
5039         mac_status = tr32(MAC_STATUS);
5040         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041                 current_link_up = 0;
5042                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043                     tp->serdes_counter == 0) {
5044                         tw32_f(MAC_MODE, (tp->mac_mode |
5045                                           MAC_MODE_SEND_CONFIGS));
5046                         udelay(1);
5047                         tw32_f(MAC_MODE, tp->mac_mode);
5048                 }
5049         }
5050
5051         if (current_link_up == 1) {
5052                 tp->link_config.active_speed = SPEED_1000;
5053                 tp->link_config.active_duplex = DUPLEX_FULL;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_1000MBPS_ON));
5057         } else {
5058                 tp->link_config.active_speed = SPEED_INVALID;
5059                 tp->link_config.active_duplex = DUPLEX_INVALID;
5060                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061                                     LED_CTRL_LNKLED_OVERRIDE |
5062                                     LED_CTRL_TRAFFIC_OVERRIDE));
5063         }
5064
5065         if (current_link_up != netif_carrier_ok(tp->dev)) {
5066                 if (current_link_up)
5067                         netif_carrier_on(tp->dev);
5068                 else
5069                         netif_carrier_off(tp->dev);
5070                 tg3_link_report(tp);
5071         } else {
5072                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073                 if (orig_pause_cfg != now_pause_cfg ||
5074                     orig_active_speed != tp->link_config.active_speed ||
5075                     orig_active_duplex != tp->link_config.active_duplex)
5076                         tg3_link_report(tp);
5077         }
5078
5079         return 0;
5080 }
5081
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5083 {
5084         int current_link_up, err = 0;
5085         u32 bmsr, bmcr;
5086         u16 current_speed;
5087         u8 current_duplex;
5088         u32 local_adv, remote_adv;
5089
5090         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091         tw32_f(MAC_MODE, tp->mac_mode);
5092         udelay(40);
5093
5094         tw32(MAC_EVENT, 0);
5095
5096         tw32_f(MAC_STATUS,
5097              (MAC_STATUS_SYNC_CHANGED |
5098               MAC_STATUS_CFG_CHANGED |
5099               MAC_STATUS_MI_COMPLETION |
5100               MAC_STATUS_LNKSTATE_CHANGED));
5101         udelay(40);
5102
5103         if (force_reset)
5104                 tg3_phy_reset(tp);
5105
5106         current_link_up = 0;
5107         current_speed = SPEED_INVALID;
5108         current_duplex = DUPLEX_INVALID;
5109         tp->link_config.rmt_adv = 0;
5110
5111         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115                         bmsr |= BMSR_LSTATUS;
5116                 else
5117                         bmsr &= ~BMSR_LSTATUS;
5118         }
5119
5120         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5121
5122         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124                 /* do nothing, just check for link up at the end */
5125         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5126                 u32 adv, newadv;
5127
5128                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130                                  ADVERTISE_1000XPAUSE |
5131                                  ADVERTISE_1000XPSE_ASYM |
5132                                  ADVERTISE_SLCT);
5133
5134                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5136
5137                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5139                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140                         tg3_writephy(tp, MII_BMCR, bmcr);
5141
5142                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145
5146                         return err;
5147                 }
5148         } else {
5149                 u32 new_bmcr;
5150
5151                 bmcr &= ~BMCR_SPEED1000;
5152                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5153
5154                 if (tp->link_config.duplex == DUPLEX_FULL)
5155                         new_bmcr |= BMCR_FULLDPLX;
5156
5157                 if (new_bmcr != bmcr) {
5158                         /* BMCR_SPEED1000 is a reserved bit that needs
5159                          * to be set on write.
5160                          */
5161                         new_bmcr |= BMCR_SPEED1000;
5162
5163                         /* Force a linkdown */
5164                         if (netif_carrier_ok(tp->dev)) {
5165                                 u32 adv;
5166
5167                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168                                 adv &= ~(ADVERTISE_1000XFULL |
5169                                          ADVERTISE_1000XHALF |
5170                                          ADVERTISE_SLCT);
5171                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5172                                 tg3_writephy(tp, MII_BMCR, bmcr |
5173                                                            BMCR_ANRESTART |
5174                                                            BMCR_ANENABLE);
5175                                 udelay(10);
5176                                 netif_carrier_off(tp->dev);
5177                         }
5178                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5179                         bmcr = new_bmcr;
5180                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5183                             ASIC_REV_5714) {
5184                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185                                         bmsr |= BMSR_LSTATUS;
5186                                 else
5187                                         bmsr &= ~BMSR_LSTATUS;
5188                         }
5189                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190                 }
5191         }
5192
5193         if (bmsr & BMSR_LSTATUS) {
5194                 current_speed = SPEED_1000;
5195                 current_link_up = 1;
5196                 if (bmcr & BMCR_FULLDPLX)
5197                         current_duplex = DUPLEX_FULL;
5198                 else
5199                         current_duplex = DUPLEX_HALF;
5200
5201                 local_adv = 0;
5202                 remote_adv = 0;
5203
5204                 if (bmcr & BMCR_ANENABLE) {
5205                         u32 common;
5206
5207                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209                         common = local_adv & remote_adv;
5210                         if (common & (ADVERTISE_1000XHALF |
5211                                       ADVERTISE_1000XFULL)) {
5212                                 if (common & ADVERTISE_1000XFULL)
5213                                         current_duplex = DUPLEX_FULL;
5214                                 else
5215                                         current_duplex = DUPLEX_HALF;
5216
5217                                 tp->link_config.rmt_adv =
5218                                            mii_adv_to_ethtool_adv_x(remote_adv);
5219                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5220                                 /* Link is up via parallel detect */
5221                         } else {
5222                                 current_link_up = 0;
5223                         }
5224                 }
5225         }
5226
5227         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5229
5230         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231         if (tp->link_config.active_duplex == DUPLEX_HALF)
5232                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5233
5234         tw32_f(MAC_MODE, tp->mac_mode);
5235         udelay(40);
5236
5237         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238
5239         tp->link_config.active_speed = current_speed;
5240         tp->link_config.active_duplex = current_duplex;
5241
5242         if (current_link_up != netif_carrier_ok(tp->dev)) {
5243                 if (current_link_up)
5244                         netif_carrier_on(tp->dev);
5245                 else {
5246                         netif_carrier_off(tp->dev);
5247                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5248                 }
5249                 tg3_link_report(tp);
5250         }
5251         return err;
5252 }
5253
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5255 {
5256         if (tp->serdes_counter) {
5257                 /* Give autoneg time to complete. */
5258                 tp->serdes_counter--;
5259                 return;
5260         }
5261
5262         if (!netif_carrier_ok(tp->dev) &&
5263             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5264                 u32 bmcr;
5265
5266                 tg3_readphy(tp, MII_BMCR, &bmcr);
5267                 if (bmcr & BMCR_ANENABLE) {
5268                         u32 phy1, phy2;
5269
5270                         /* Select shadow register 0x1f */
5271                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5273
5274                         /* Select expansion interrupt status register */
5275                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276                                          MII_TG3_DSP_EXP1_INT_STAT);
5277                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279
5280                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281                                 /* We have signal detect and not receiving
5282                                  * config code words, link is up by parallel
5283                                  * detection.
5284                                  */
5285
5286                                 bmcr &= ~BMCR_ANENABLE;
5287                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288                                 tg3_writephy(tp, MII_BMCR, bmcr);
5289                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5290                         }
5291                 }
5292         } else if (netif_carrier_ok(tp->dev) &&
5293                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5295                 u32 phy2;
5296
5297                 /* Select expansion interrupt status register */
5298                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299                                  MII_TG3_DSP_EXP1_INT_STAT);
5300                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301                 if (phy2 & 0x20) {
5302                         u32 bmcr;
5303
5304                         /* Config code words received, turn on autoneg. */
5305                         tg3_readphy(tp, MII_BMCR, &bmcr);
5306                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5307
5308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5309
5310                 }
5311         }
5312 }
5313
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5315 {
5316         u32 val;
5317         int err;
5318
5319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320                 err = tg3_setup_fiber_phy(tp, force_reset);
5321         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5323         else
5324                 err = tg3_setup_copper_phy(tp, force_reset);
5325
5326         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5327                 u32 scale;
5328
5329                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5331                         scale = 65;
5332                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5333                         scale = 6;
5334                 else
5335                         scale = 12;
5336
5337                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339                 tw32(GRC_MISC_CFG, val);
5340         }
5341
5342         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343               (6 << TX_LENGTHS_IPG_SHIFT);
5344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345                 val |= tr32(MAC_TX_LENGTHS) &
5346                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5348
5349         if (tp->link_config.active_speed == SPEED_1000 &&
5350             tp->link_config.active_duplex == DUPLEX_HALF)
5351                 tw32(MAC_TX_LENGTHS, val |
5352                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5353         else
5354                 tw32(MAC_TX_LENGTHS, val |
5355                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5356
5357         if (!tg3_flag(tp, 5705_PLUS)) {
5358                 if (netif_carrier_ok(tp->dev)) {
5359                         tw32(HOSTCC_STAT_COAL_TICKS,
5360                              tp->coal.stats_block_coalesce_usecs);
5361                 } else {
5362                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5363                 }
5364         }
5365
5366         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367                 val = tr32(PCIE_PWR_MGMT_THRESH);
5368                 if (!netif_carrier_ok(tp->dev))
5369                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5370                               tp->pwrmgmt_thresh;
5371                 else
5372                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373                 tw32(PCIE_PWR_MGMT_THRESH, val);
5374         }
5375
5376         return err;
5377 }
5378
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5380 {
5381         return tp->irq_sync;
5382 }
5383
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5385 {
5386         int i;
5387
5388         dst = (u32 *)((u8 *)dst + off);
5389         for (i = 0; i < len; i += sizeof(u32))
5390                 *dst++ = tr32(off + i);
5391 }
5392
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5394 {
5395         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5414
5415         if (tg3_flag(tp, SUPPORT_MSIX))
5416                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5417
5418         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5426
5427         if (!tg3_flag(tp, 5705_PLUS)) {
5428                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5431         }
5432
5433         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5438
5439         if (tg3_flag(tp, NVRAM))
5440                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5441 }
5442
5443 static void tg3_dump_state(struct tg3 *tp)
5444 {
5445         int i;
5446         u32 *regs;
5447
5448         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5449         if (!regs) {
5450                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5451                 return;
5452         }
5453
5454         if (tg3_flag(tp, PCI_EXPRESS)) {
5455                 /* Read up to but not including private PCI registers */
5456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457                         regs[i / sizeof(u32)] = tr32(i);
5458         } else
5459                 tg3_dump_legacy_regs(tp, regs);
5460
5461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462                 if (!regs[i + 0] && !regs[i + 1] &&
5463                     !regs[i + 2] && !regs[i + 3])
5464                         continue;
5465
5466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5467                            i * 4,
5468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5469         }
5470
5471         kfree(regs);
5472
5473         for (i = 0; i < tp->irq_cnt; i++) {
5474                 struct tg3_napi *tnapi = &tp->napi[i];
5475
5476                 /* SW status block */
5477                 netdev_err(tp->dev,
5478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5479                            i,
5480                            tnapi->hw_status->status,
5481                            tnapi->hw_status->status_tag,
5482                            tnapi->hw_status->rx_jumbo_consumer,
5483                            tnapi->hw_status->rx_consumer,
5484                            tnapi->hw_status->rx_mini_consumer,
5485                            tnapi->hw_status->idx[0].rx_producer,
5486                            tnapi->hw_status->idx[0].tx_consumer);
5487
5488                 netdev_err(tp->dev,
5489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5490                            i,
5491                            tnapi->last_tag, tnapi->last_irq_tag,
5492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5493                            tnapi->rx_rcb_ptr,
5494                            tnapi->prodring.rx_std_prod_idx,
5495                            tnapi->prodring.rx_std_cons_idx,
5496                            tnapi->prodring.rx_jmb_prod_idx,
5497                            tnapi->prodring.rx_jmb_cons_idx);
5498         }
5499 }
5500
5501 /* This is called whenever we suspect that the system chipset is re-
5502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503  * is bogus tx completions. We try to recover by setting the
5504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5505  * in the workqueue.
5506  */
5507 static void tg3_tx_recover(struct tg3 *tp)
5508 {
5509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5511
5512         netdev_warn(tp->dev,
5513                     "The system may be re-ordering memory-mapped I/O "
5514                     "cycles to the network device, attempting to recover. "
5515                     "Please report the problem to the driver maintainer "
5516                     "and include system chipset information.\n");
5517
5518         spin_lock(&tp->lock);
5519         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520         spin_unlock(&tp->lock);
5521 }
5522
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5524 {
5525         /* Tell compiler to fetch tx indices from memory. */
5526         barrier();
5527         return tnapi->tx_pending -
5528                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5529 }
5530
5531 /* Tigon3 never reports partial packet sends.  So we do not
5532  * need special logic to handle SKBs that have not had all
5533  * of their frags sent yet, like SunGEM does.
5534  */
5535 static void tg3_tx(struct tg3_napi *tnapi)
5536 {
5537         struct tg3 *tp = tnapi->tp;
5538         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539         u32 sw_idx = tnapi->tx_cons;
5540         struct netdev_queue *txq;
5541         int index = tnapi - tp->napi;
5542         unsigned int pkts_compl = 0, bytes_compl = 0;
5543
5544         if (tg3_flag(tp, ENABLE_TSS))
5545                 index--;
5546
5547         txq = netdev_get_tx_queue(tp->dev, index);
5548
5549         while (sw_idx != hw_idx) {
5550                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551                 struct sk_buff *skb = ri->skb;
5552                 int i, tx_bug = 0;
5553
5554                 if (unlikely(skb == NULL)) {
5555                         tg3_tx_recover(tp);
5556                         return;
5557                 }
5558
5559                 pci_unmap_single(tp->pdev,
5560                                  dma_unmap_addr(ri, mapping),
5561                                  skb_headlen(skb),
5562                                  PCI_DMA_TODEVICE);
5563
5564                 ri->skb = NULL;
5565
5566                 while (ri->fragmented) {
5567                         ri->fragmented = false;
5568                         sw_idx = NEXT_TX(sw_idx);
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                 }
5571
5572                 sw_idx = NEXT_TX(sw_idx);
5573
5574                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575                         ri = &tnapi->tx_buffers[sw_idx];
5576                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5577                                 tx_bug = 1;
5578
5579                         pci_unmap_page(tp->pdev,
5580                                        dma_unmap_addr(ri, mapping),
5581                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5582                                        PCI_DMA_TODEVICE);
5583
5584                         while (ri->fragmented) {
5585                                 ri->fragmented = false;
5586                                 sw_idx = NEXT_TX(sw_idx);
5587                                 ri = &tnapi->tx_buffers[sw_idx];
5588                         }
5589
5590                         sw_idx = NEXT_TX(sw_idx);
5591                 }
5592
5593                 pkts_compl++;
5594                 bytes_compl += skb->len;
5595
5596                 dev_kfree_skb(skb);
5597
5598                 if (unlikely(tx_bug)) {
5599                         tg3_tx_recover(tp);
5600                         return;
5601                 }
5602         }
5603
5604         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5605
5606         tnapi->tx_cons = sw_idx;
5607
5608         /* Need to make the tx_cons update visible to tg3_start_xmit()
5609          * before checking for netif_queue_stopped().  Without the
5610          * memory barrier, there is a small possibility that tg3_start_xmit()
5611          * will miss it and cause the queue to be stopped forever.
5612          */
5613         smp_mb();
5614
5615         if (unlikely(netif_tx_queue_stopped(txq) &&
5616                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617                 __netif_tx_lock(txq, smp_processor_id());
5618                 if (netif_tx_queue_stopped(txq) &&
5619                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620                         netif_tx_wake_queue(txq);
5621                 __netif_tx_unlock(txq);
5622         }
5623 }
5624
5625 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5626 {
5627         if (!ri->data)
5628                 return;
5629
5630         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631                          map_sz, PCI_DMA_FROMDEVICE);
5632         kfree(ri->data);
5633         ri->data = NULL;
5634 }
5635
5636 /* Returns size of skb allocated or < 0 on error.
5637  *
5638  * We only need to fill in the address because the other members
5639  * of the RX descriptor are invariant, see tg3_init_rings.
5640  *
5641  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5642  * posting buffers we only dirty the first cache line of the RX
5643  * descriptor (containing the address).  Whereas for the RX status
5644  * buffers the cpu only reads the last cacheline of the RX descriptor
5645  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5646  */
5647 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648                             u32 opaque_key, u32 dest_idx_unmasked)
5649 {
5650         struct tg3_rx_buffer_desc *desc;
5651         struct ring_info *map;
5652         u8 *data;
5653         dma_addr_t mapping;
5654         int skb_size, data_size, dest_idx;
5655
5656         switch (opaque_key) {
5657         case RXD_OPAQUE_RING_STD:
5658                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5659                 desc = &tpr->rx_std[dest_idx];
5660                 map = &tpr->rx_std_buffers[dest_idx];
5661                 data_size = tp->rx_pkt_map_sz;
5662                 break;
5663
5664         case RXD_OPAQUE_RING_JUMBO:
5665                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5666                 desc = &tpr->rx_jmb[dest_idx].std;
5667                 map = &tpr->rx_jmb_buffers[dest_idx];
5668                 data_size = TG3_RX_JMB_MAP_SZ;
5669                 break;
5670
5671         default:
5672                 return -EINVAL;
5673         }
5674
5675         /* Do not overwrite any of the map or rp information
5676          * until we are sure we can commit to a new buffer.
5677          *
5678          * Callers depend upon this behavior and assume that
5679          * we leave everything unchanged if we fail.
5680          */
5681         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683         data = kmalloc(skb_size, GFP_ATOMIC);
5684         if (!data)
5685                 return -ENOMEM;
5686
5687         mapping = pci_map_single(tp->pdev,
5688                                  data + TG3_RX_OFFSET(tp),
5689                                  data_size,
5690                                  PCI_DMA_FROMDEVICE);
5691         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5692                 kfree(data);
5693                 return -EIO;
5694         }
5695
5696         map->data = data;
5697         dma_unmap_addr_set(map, mapping, mapping);
5698
5699         desc->addr_hi = ((u64)mapping >> 32);
5700         desc->addr_lo = ((u64)mapping & 0xffffffff);
5701
5702         return data_size;
5703 }
5704
5705 /* We only need to move over in the address because the other
5706  * members of the RX descriptor are invariant.  See notes above
5707  * tg3_alloc_rx_data for full details.
5708  */
5709 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5710                            struct tg3_rx_prodring_set *dpr,
5711                            u32 opaque_key, int src_idx,
5712                            u32 dest_idx_unmasked)
5713 {
5714         struct tg3 *tp = tnapi->tp;
5715         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5716         struct ring_info *src_map, *dest_map;
5717         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5718         int dest_idx;
5719
5720         switch (opaque_key) {
5721         case RXD_OPAQUE_RING_STD:
5722                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5723                 dest_desc = &dpr->rx_std[dest_idx];
5724                 dest_map = &dpr->rx_std_buffers[dest_idx];
5725                 src_desc = &spr->rx_std[src_idx];
5726                 src_map = &spr->rx_std_buffers[src_idx];
5727                 break;
5728
5729         case RXD_OPAQUE_RING_JUMBO:
5730                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5731                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5732                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5733                 src_desc = &spr->rx_jmb[src_idx].std;
5734                 src_map = &spr->rx_jmb_buffers[src_idx];
5735                 break;
5736
5737         default:
5738                 return;
5739         }
5740
5741         dest_map->data = src_map->data;
5742         dma_unmap_addr_set(dest_map, mapping,
5743                            dma_unmap_addr(src_map, mapping));
5744         dest_desc->addr_hi = src_desc->addr_hi;
5745         dest_desc->addr_lo = src_desc->addr_lo;
5746
5747         /* Ensure that the update to the skb happens after the physical
5748          * addresses have been transferred to the new BD location.
5749          */
5750         smp_wmb();
5751
5752         src_map->data = NULL;
5753 }
5754
5755 /* The RX ring scheme is composed of multiple rings which post fresh
5756  * buffers to the chip, and one special ring the chip uses to report
5757  * status back to the host.
5758  *
5759  * The special ring reports the status of received packets to the
5760  * host.  The chip does not write into the original descriptor the
5761  * RX buffer was obtained from.  The chip simply takes the original
5762  * descriptor as provided by the host, updates the status and length
5763  * field, then writes this into the next status ring entry.
5764  *
5765  * Each ring the host uses to post buffers to the chip is described
5766  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5767  * it is first placed into the on-chip ram.  When the packet's length
5768  * is known, it walks down the TG3_BDINFO entries to select the ring.
5769  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5770  * which is within the range of the new packet's length is chosen.
5771  *
5772  * The "separate ring for rx status" scheme may sound queer, but it makes
5773  * sense from a cache coherency perspective.  If only the host writes
5774  * to the buffer post rings, and only the chip writes to the rx status
5775  * rings, then cache lines never move beyond shared-modified state.
5776  * If both the host and chip were to write into the same ring, cache line
5777  * eviction could occur since both entities want it in an exclusive state.
5778  */
5779 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5780 {
5781         struct tg3 *tp = tnapi->tp;
5782         u32 work_mask, rx_std_posted = 0;
5783         u32 std_prod_idx, jmb_prod_idx;
5784         u32 sw_idx = tnapi->rx_rcb_ptr;
5785         u16 hw_idx;
5786         int received;
5787         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5788
5789         hw_idx = *(tnapi->rx_rcb_prod_idx);
5790         /*
5791          * We need to order the read of hw_idx and the read of
5792          * the opaque cookie.
5793          */
5794         rmb();
5795         work_mask = 0;
5796         received = 0;
5797         std_prod_idx = tpr->rx_std_prod_idx;
5798         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5799         while (sw_idx != hw_idx && budget > 0) {
5800                 struct ring_info *ri;
5801                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5802                 unsigned int len;
5803                 struct sk_buff *skb;
5804                 dma_addr_t dma_addr;
5805                 u32 opaque_key, desc_idx, *post_ptr;
5806                 u8 *data;
5807
5808                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5809                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5810                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5811                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &std_prod_idx;
5815                         rx_std_posted++;
5816                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5817                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5818                         dma_addr = dma_unmap_addr(ri, mapping);
5819                         data = ri->data;
5820                         post_ptr = &jmb_prod_idx;
5821                 } else
5822                         goto next_pkt_nopost;
5823
5824                 work_mask |= opaque_key;
5825
5826                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5827                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5828                 drop_it:
5829                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5830                                        desc_idx, *post_ptr);
5831                 drop_it_no_recycle:
5832                         /* Other statistics kept track of by card. */
5833                         tp->rx_dropped++;
5834                         goto next_pkt;
5835                 }
5836
5837                 prefetch(data + TG3_RX_OFFSET(tp));
5838                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5839                       ETH_FCS_LEN;
5840
5841                 if (len > TG3_RX_COPY_THRESH(tp)) {
5842                         int skb_size;
5843
5844                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5845                                                     *post_ptr);
5846                         if (skb_size < 0)
5847                                 goto drop_it;
5848
5849                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850                                          PCI_DMA_FROMDEVICE);
5851
5852                         skb = build_skb(data);
5853                         if (!skb) {
5854                                 kfree(data);
5855                                 goto drop_it_no_recycle;
5856                         }
5857                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5858                         /* Ensure that the update to the data happens
5859                          * after the usage of the old DMA mapping.
5860                          */
5861                         smp_wmb();
5862
5863                         ri->data = NULL;
5864
5865                 } else {
5866                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5867                                        desc_idx, *post_ptr);
5868
5869                         skb = netdev_alloc_skb(tp->dev,
5870                                                len + TG3_RAW_IP_ALIGN);
5871                         if (skb == NULL)
5872                                 goto drop_it_no_recycle;
5873
5874                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5875                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5876                         memcpy(skb->data,
5877                                data + TG3_RX_OFFSET(tp),
5878                                len);
5879                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5880                 }
5881
5882                 skb_put(skb, len);
5883                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5884                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5885                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5886                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5887                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5888                 else
5889                         skb_checksum_none_assert(skb);
5890
5891                 skb->protocol = eth_type_trans(skb, tp->dev);
5892
5893                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5894                     skb->protocol != htons(ETH_P_8021Q)) {
5895                         dev_kfree_skb(skb);
5896                         goto drop_it_no_recycle;
5897                 }
5898
5899                 if (desc->type_flags & RXD_FLAG_VLAN &&
5900                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5901                         __vlan_hwaccel_put_tag(skb,
5902                                                desc->err_vlan & RXD_VLAN_MASK);
5903
5904                 napi_gro_receive(&tnapi->napi, skb);
5905
5906                 received++;
5907                 budget--;
5908
5909 next_pkt:
5910                 (*post_ptr)++;
5911
5912                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5913                         tpr->rx_std_prod_idx = std_prod_idx &
5914                                                tp->rx_std_ring_mask;
5915                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916                                      tpr->rx_std_prod_idx);
5917                         work_mask &= ~RXD_OPAQUE_RING_STD;
5918                         rx_std_posted = 0;
5919                 }
5920 next_pkt_nopost:
5921                 sw_idx++;
5922                 sw_idx &= tp->rx_ret_ring_mask;
5923
5924                 /* Refresh hw_idx to see if there is new work */
5925                 if (sw_idx == hw_idx) {
5926                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5927                         rmb();
5928                 }
5929         }
5930
5931         /* ACK the status ring. */
5932         tnapi->rx_rcb_ptr = sw_idx;
5933         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5934
5935         /* Refill RX ring(s). */
5936         if (!tg3_flag(tp, ENABLE_RSS)) {
5937                 if (work_mask & RXD_OPAQUE_RING_STD) {
5938                         tpr->rx_std_prod_idx = std_prod_idx &
5939                                                tp->rx_std_ring_mask;
5940                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5941                                      tpr->rx_std_prod_idx);
5942                 }
5943                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5944                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5945                                                tp->rx_jmb_ring_mask;
5946                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5947                                      tpr->rx_jmb_prod_idx);
5948                 }
5949                 mmiowb();
5950         } else if (work_mask) {
5951                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5952                  * updated before the producer indices can be updated.
5953                  */
5954                 smp_wmb();
5955
5956                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5957                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5958
5959                 if (tnapi != &tp->napi[1])
5960                         napi_schedule(&tp->napi[1].napi);
5961         }
5962
5963         return received;
5964 }
5965
5966 static void tg3_poll_link(struct tg3 *tp)
5967 {
5968         /* handle link change and other phy events */
5969         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5970                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5971
5972                 if (sblk->status & SD_STATUS_LINK_CHG) {
5973                         sblk->status = SD_STATUS_UPDATED |
5974                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5975                         spin_lock(&tp->lock);
5976                         if (tg3_flag(tp, USE_PHYLIB)) {
5977                                 tw32_f(MAC_STATUS,
5978                                      (MAC_STATUS_SYNC_CHANGED |
5979                                       MAC_STATUS_CFG_CHANGED |
5980                                       MAC_STATUS_MI_COMPLETION |
5981                                       MAC_STATUS_LNKSTATE_CHANGED));
5982                                 udelay(40);
5983                         } else
5984                                 tg3_setup_phy(tp, 0);
5985                         spin_unlock(&tp->lock);
5986                 }
5987         }
5988 }
5989
5990 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5991                                 struct tg3_rx_prodring_set *dpr,
5992                                 struct tg3_rx_prodring_set *spr)
5993 {
5994         u32 si, di, cpycnt, src_prod_idx;
5995         int i, err = 0;
5996
5997         while (1) {
5998                 src_prod_idx = spr->rx_std_prod_idx;
5999
6000                 /* Make sure updates to the rx_std_buffers[] entries and the
6001                  * standard producer index are seen in the correct order.
6002                  */
6003                 smp_rmb();
6004
6005                 if (spr->rx_std_cons_idx == src_prod_idx)
6006                         break;
6007
6008                 if (spr->rx_std_cons_idx < src_prod_idx)
6009                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6010                 else
6011                         cpycnt = tp->rx_std_ring_mask + 1 -
6012                                  spr->rx_std_cons_idx;
6013
6014                 cpycnt = min(cpycnt,
6015                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6016
6017                 si = spr->rx_std_cons_idx;
6018                 di = dpr->rx_std_prod_idx;
6019
6020                 for (i = di; i < di + cpycnt; i++) {
6021                         if (dpr->rx_std_buffers[i].data) {
6022                                 cpycnt = i - di;
6023                                 err = -ENOSPC;
6024                                 break;
6025                         }
6026                 }
6027
6028                 if (!cpycnt)
6029                         break;
6030
6031                 /* Ensure that updates to the rx_std_buffers ring and the
6032                  * shadowed hardware producer ring from tg3_recycle_skb() are
6033                  * ordered correctly WRT the skb check above.
6034                  */
6035                 smp_rmb();
6036
6037                 memcpy(&dpr->rx_std_buffers[di],
6038                        &spr->rx_std_buffers[si],
6039                        cpycnt * sizeof(struct ring_info));
6040
6041                 for (i = 0; i < cpycnt; i++, di++, si++) {
6042                         struct tg3_rx_buffer_desc *sbd, *dbd;
6043                         sbd = &spr->rx_std[si];
6044                         dbd = &dpr->rx_std[di];
6045                         dbd->addr_hi = sbd->addr_hi;
6046                         dbd->addr_lo = sbd->addr_lo;
6047                 }
6048
6049                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6050                                        tp->rx_std_ring_mask;
6051                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6052                                        tp->rx_std_ring_mask;
6053         }
6054
6055         while (1) {
6056                 src_prod_idx = spr->rx_jmb_prod_idx;
6057
6058                 /* Make sure updates to the rx_jmb_buffers[] entries and
6059                  * the jumbo producer index are seen in the correct order.
6060                  */
6061                 smp_rmb();
6062
6063                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6064                         break;
6065
6066                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6067                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6068                 else
6069                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6070                                  spr->rx_jmb_cons_idx;
6071
6072                 cpycnt = min(cpycnt,
6073                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6074
6075                 si = spr->rx_jmb_cons_idx;
6076                 di = dpr->rx_jmb_prod_idx;
6077
6078                 for (i = di; i < di + cpycnt; i++) {
6079                         if (dpr->rx_jmb_buffers[i].data) {
6080                                 cpycnt = i - di;
6081                                 err = -ENOSPC;
6082                                 break;
6083                         }
6084                 }
6085
6086                 if (!cpycnt)
6087                         break;
6088
6089                 /* Ensure that updates to the rx_jmb_buffers ring and the
6090                  * shadowed hardware producer ring from tg3_recycle_skb() are
6091                  * ordered correctly WRT the skb check above.
6092                  */
6093                 smp_rmb();
6094
6095                 memcpy(&dpr->rx_jmb_buffers[di],
6096                        &spr->rx_jmb_buffers[si],
6097                        cpycnt * sizeof(struct ring_info));
6098
6099                 for (i = 0; i < cpycnt; i++, di++, si++) {
6100                         struct tg3_rx_buffer_desc *sbd, *dbd;
6101                         sbd = &spr->rx_jmb[si].std;
6102                         dbd = &dpr->rx_jmb[di].std;
6103                         dbd->addr_hi = sbd->addr_hi;
6104                         dbd->addr_lo = sbd->addr_lo;
6105                 }
6106
6107                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6108                                        tp->rx_jmb_ring_mask;
6109                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6110                                        tp->rx_jmb_ring_mask;
6111         }
6112
6113         return err;
6114 }
6115
6116 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6117 {
6118         struct tg3 *tp = tnapi->tp;
6119
6120         /* run TX completion thread */
6121         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6122                 tg3_tx(tnapi);
6123                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124                         return work_done;
6125         }
6126
6127         /* run RX thread, within the bounds set by NAPI.
6128          * All RX "locking" is done by ensuring outside
6129          * code synchronizes with tg3->napi.poll()
6130          */
6131         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6132                 work_done += tg3_rx(tnapi, budget - work_done);
6133
6134         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6135                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6136                 int i, err = 0;
6137                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6138                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6139
6140                 for (i = 1; i < tp->irq_cnt; i++)
6141                         err |= tg3_rx_prodring_xfer(tp, dpr,
6142                                                     &tp->napi[i].prodring);
6143
6144                 wmb();
6145
6146                 if (std_prod_idx != dpr->rx_std_prod_idx)
6147                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6148                                      dpr->rx_std_prod_idx);
6149
6150                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6151                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6152                                      dpr->rx_jmb_prod_idx);
6153
6154                 mmiowb();
6155
6156                 if (err)
6157                         tw32_f(HOSTCC_MODE, tp->coal_now);
6158         }
6159
6160         return work_done;
6161 }
6162
6163 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6164 {
6165         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6166                 schedule_work(&tp->reset_task);
6167 }
6168
6169 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6170 {
6171         cancel_work_sync(&tp->reset_task);
6172         tg3_flag_clear(tp, RESET_TASK_PENDING);
6173 }
6174
6175 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6176 {
6177         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6178         struct tg3 *tp = tnapi->tp;
6179         int work_done = 0;
6180         struct tg3_hw_status *sblk = tnapi->hw_status;
6181
6182         while (1) {
6183                 work_done = tg3_poll_work(tnapi, work_done, budget);
6184
6185                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6186                         goto tx_recovery;
6187
6188                 if (unlikely(work_done >= budget))
6189                         break;
6190
6191                 /* tp->last_tag is used in tg3_int_reenable() below
6192                  * to tell the hw how much work has been processed,
6193                  * so we must read it before checking for more work.
6194                  */
6195                 tnapi->last_tag = sblk->status_tag;
6196                 tnapi->last_irq_tag = tnapi->last_tag;
6197                 rmb();
6198
6199                 /* check for RX/TX work to do */
6200                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6201                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6202                         napi_complete(napi);
6203                         /* Reenable interrupts. */
6204                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6205                         mmiowb();
6206                         break;
6207                 }
6208         }
6209
6210         return work_done;
6211
6212 tx_recovery:
6213         /* work_done is guaranteed to be less than budget. */
6214         napi_complete(napi);
6215         tg3_reset_task_schedule(tp);
6216         return work_done;
6217 }
6218
6219 static void tg3_process_error(struct tg3 *tp)
6220 {
6221         u32 val;
6222         bool real_error = false;
6223
6224         if (tg3_flag(tp, ERROR_PROCESSED))
6225                 return;
6226
6227         /* Check Flow Attention register */
6228         val = tr32(HOSTCC_FLOW_ATTN);
6229         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6230                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6231                 real_error = true;
6232         }
6233
6234         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6235                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6236                 real_error = true;
6237         }
6238
6239         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6240                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6241                 real_error = true;
6242         }
6243
6244         if (!real_error)
6245                 return;
6246
6247         tg3_dump_state(tp);
6248
6249         tg3_flag_set(tp, ERROR_PROCESSED);
6250         tg3_reset_task_schedule(tp);
6251 }
6252
6253 static int tg3_poll(struct napi_struct *napi, int budget)
6254 {
6255         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6256         struct tg3 *tp = tnapi->tp;
6257         int work_done = 0;
6258         struct tg3_hw_status *sblk = tnapi->hw_status;
6259
6260         while (1) {
6261                 if (sblk->status & SD_STATUS_ERROR)
6262                         tg3_process_error(tp);
6263
6264                 tg3_poll_link(tp);
6265
6266                 work_done = tg3_poll_work(tnapi, work_done, budget);
6267
6268                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6269                         goto tx_recovery;
6270
6271                 if (unlikely(work_done >= budget))
6272                         break;
6273
6274                 if (tg3_flag(tp, TAGGED_STATUS)) {
6275                         /* tp->last_tag is used in tg3_int_reenable() below
6276                          * to tell the hw how much work has been processed,
6277                          * so we must read it before checking for more work.
6278                          */
6279                         tnapi->last_tag = sblk->status_tag;
6280                         tnapi->last_irq_tag = tnapi->last_tag;
6281                         rmb();
6282                 } else
6283                         sblk->status &= ~SD_STATUS_UPDATED;
6284
6285                 if (likely(!tg3_has_work(tnapi))) {
6286                         napi_complete(napi);
6287                         tg3_int_reenable(tnapi);
6288                         break;
6289                 }
6290         }
6291
6292         return work_done;
6293
6294 tx_recovery:
6295         /* work_done is guaranteed to be less than budget. */
6296         napi_complete(napi);
6297         tg3_reset_task_schedule(tp);
6298         return work_done;
6299 }
6300
6301 static void tg3_napi_disable(struct tg3 *tp)
6302 {
6303         int i;
6304
6305         for (i = tp->irq_cnt - 1; i >= 0; i--)
6306                 napi_disable(&tp->napi[i].napi);
6307 }
6308
6309 static void tg3_napi_enable(struct tg3 *tp)
6310 {
6311         int i;
6312
6313         for (i = 0; i < tp->irq_cnt; i++)
6314                 napi_enable(&tp->napi[i].napi);
6315 }
6316
6317 static void tg3_napi_init(struct tg3 *tp)
6318 {
6319         int i;
6320
6321         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6322         for (i = 1; i < tp->irq_cnt; i++)
6323                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6324 }
6325
6326 static void tg3_napi_fini(struct tg3 *tp)
6327 {
6328         int i;
6329
6330         for (i = 0; i < tp->irq_cnt; i++)
6331                 netif_napi_del(&tp->napi[i].napi);
6332 }
6333
6334 static inline void tg3_netif_stop(struct tg3 *tp)
6335 {
6336         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6337         tg3_napi_disable(tp);
6338         netif_tx_disable(tp->dev);
6339 }
6340
6341 static inline void tg3_netif_start(struct tg3 *tp)
6342 {
6343         /* NOTE: unconditional netif_tx_wake_all_queues is only
6344          * appropriate so long as all callers are assured to
6345          * have free tx slots (such as after tg3_init_hw)
6346          */
6347         netif_tx_wake_all_queues(tp->dev);
6348
6349         tg3_napi_enable(tp);
6350         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6351         tg3_enable_ints(tp);
6352 }
6353
6354 static void tg3_irq_quiesce(struct tg3 *tp)
6355 {
6356         int i;
6357
6358         BUG_ON(tp->irq_sync);
6359
6360         tp->irq_sync = 1;
6361         smp_mb();
6362
6363         for (i = 0; i < tp->irq_cnt; i++)
6364                 synchronize_irq(tp->napi[i].irq_vec);
6365 }
6366
6367 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6368  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6369  * with as well.  Most of the time, this is not necessary except when
6370  * shutting down the device.
6371  */
6372 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6373 {
6374         spin_lock_bh(&tp->lock);
6375         if (irq_sync)
6376                 tg3_irq_quiesce(tp);
6377 }
6378
6379 static inline void tg3_full_unlock(struct tg3 *tp)
6380 {
6381         spin_unlock_bh(&tp->lock);
6382 }
6383
6384 /* One-shot MSI handler - Chip automatically disables interrupt
6385  * after sending MSI so driver doesn't have to do it.
6386  */
6387 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6388 {
6389         struct tg3_napi *tnapi = dev_id;
6390         struct tg3 *tp = tnapi->tp;
6391
6392         prefetch(tnapi->hw_status);
6393         if (tnapi->rx_rcb)
6394                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6395
6396         if (likely(!tg3_irq_sync(tp)))
6397                 napi_schedule(&tnapi->napi);
6398
6399         return IRQ_HANDLED;
6400 }
6401
6402 /* MSI ISR - No need to check for interrupt sharing and no need to
6403  * flush status block and interrupt mailbox. PCI ordering rules
6404  * guarantee that MSI will arrive after the status block.
6405  */
6406 static irqreturn_t tg3_msi(int irq, void *dev_id)
6407 {
6408         struct tg3_napi *tnapi = dev_id;
6409         struct tg3 *tp = tnapi->tp;
6410
6411         prefetch(tnapi->hw_status);
6412         if (tnapi->rx_rcb)
6413                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6414         /*
6415          * Writing any value to intr-mbox-0 clears PCI INTA# and
6416          * chip-internal interrupt pending events.
6417          * Writing non-zero to intr-mbox-0 additional tells the
6418          * NIC to stop sending us irqs, engaging "in-intr-handler"
6419          * event coalescing.
6420          */
6421         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6422         if (likely(!tg3_irq_sync(tp)))
6423                 napi_schedule(&tnapi->napi);
6424
6425         return IRQ_RETVAL(1);
6426 }
6427
6428 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6429 {
6430         struct tg3_napi *tnapi = dev_id;
6431         struct tg3 *tp = tnapi->tp;
6432         struct tg3_hw_status *sblk = tnapi->hw_status;
6433         unsigned int handled = 1;
6434
6435         /* In INTx mode, it is possible for the interrupt to arrive at
6436          * the CPU before the status block posted prior to the interrupt.
6437          * Reading the PCI State register will confirm whether the
6438          * interrupt is ours and will flush the status block.
6439          */
6440         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6441                 if (tg3_flag(tp, CHIP_RESETTING) ||
6442                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6443                         handled = 0;
6444                         goto out;
6445                 }
6446         }
6447
6448         /*
6449          * Writing any value to intr-mbox-0 clears PCI INTA# and
6450          * chip-internal interrupt pending events.
6451          * Writing non-zero to intr-mbox-0 additional tells the
6452          * NIC to stop sending us irqs, engaging "in-intr-handler"
6453          * event coalescing.
6454          *
6455          * Flush the mailbox to de-assert the IRQ immediately to prevent
6456          * spurious interrupts.  The flush impacts performance but
6457          * excessive spurious interrupts can be worse in some cases.
6458          */
6459         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6460         if (tg3_irq_sync(tp))
6461                 goto out;
6462         sblk->status &= ~SD_STATUS_UPDATED;
6463         if (likely(tg3_has_work(tnapi))) {
6464                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6465                 napi_schedule(&tnapi->napi);
6466         } else {
6467                 /* No work, shared interrupt perhaps?  re-enable
6468                  * interrupts, and flush that PCI write
6469                  */
6470                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6471                                0x00000000);
6472         }
6473 out:
6474         return IRQ_RETVAL(handled);
6475 }
6476
6477 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6478 {
6479         struct tg3_napi *tnapi = dev_id;
6480         struct tg3 *tp = tnapi->tp;
6481         struct tg3_hw_status *sblk = tnapi->hw_status;
6482         unsigned int handled = 1;
6483
6484         /* In INTx mode, it is possible for the interrupt to arrive at
6485          * the CPU before the status block posted prior to the interrupt.
6486          * Reading the PCI State register will confirm whether the
6487          * interrupt is ours and will flush the status block.
6488          */
6489         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6490                 if (tg3_flag(tp, CHIP_RESETTING) ||
6491                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6492                         handled = 0;
6493                         goto out;
6494                 }
6495         }
6496
6497         /*
6498          * writing any value to intr-mbox-0 clears PCI INTA# and
6499          * chip-internal interrupt pending events.
6500          * writing non-zero to intr-mbox-0 additional tells the
6501          * NIC to stop sending us irqs, engaging "in-intr-handler"
6502          * event coalescing.
6503          *
6504          * Flush the mailbox to de-assert the IRQ immediately to prevent
6505          * spurious interrupts.  The flush impacts performance but
6506          * excessive spurious interrupts can be worse in some cases.
6507          */
6508         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6509
6510         /*
6511          * In a shared interrupt configuration, sometimes other devices'
6512          * interrupts will scream.  We record the current status tag here
6513          * so that the above check can report that the screaming interrupts
6514          * are unhandled.  Eventually they will be silenced.
6515          */
6516         tnapi->last_irq_tag = sblk->status_tag;
6517
6518         if (tg3_irq_sync(tp))
6519                 goto out;
6520
6521         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6522
6523         napi_schedule(&tnapi->napi);
6524
6525 out:
6526         return IRQ_RETVAL(handled);
6527 }
6528
6529 /* ISR for interrupt test */
6530 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6531 {
6532         struct tg3_napi *tnapi = dev_id;
6533         struct tg3 *tp = tnapi->tp;
6534         struct tg3_hw_status *sblk = tnapi->hw_status;
6535
6536         if ((sblk->status & SD_STATUS_UPDATED) ||
6537             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6538                 tg3_disable_ints(tp);
6539                 return IRQ_RETVAL(1);
6540         }
6541         return IRQ_RETVAL(0);
6542 }
6543
6544 #ifdef CONFIG_NET_POLL_CONTROLLER
6545 static void tg3_poll_controller(struct net_device *dev)
6546 {
6547         int i;
6548         struct tg3 *tp = netdev_priv(dev);
6549
6550         for (i = 0; i < tp->irq_cnt; i++)
6551                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6552 }
6553 #endif
6554
6555 static void tg3_tx_timeout(struct net_device *dev)
6556 {
6557         struct tg3 *tp = netdev_priv(dev);
6558
6559         if (netif_msg_tx_err(tp)) {
6560                 netdev_err(dev, "transmit timed out, resetting\n");
6561                 tg3_dump_state(tp);
6562         }
6563
6564         tg3_reset_task_schedule(tp);
6565 }
6566
6567 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6568 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6569 {
6570         u32 base = (u32) mapping & 0xffffffff;
6571
6572         return (base > 0xffffdcc0) && (base + len + 8 < base);
6573 }
6574
6575 /* Test for DMA addresses > 40-bit */
6576 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6577                                           int len)
6578 {
6579 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6580         if (tg3_flag(tp, 40BIT_DMA_BUG))
6581                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6582         return 0;
6583 #else
6584         return 0;
6585 #endif
6586 }
6587
6588 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6589                                  dma_addr_t mapping, u32 len, u32 flags,
6590                                  u32 mss, u32 vlan)
6591 {
6592         txbd->addr_hi = ((u64) mapping >> 32);
6593         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6594         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6595         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6596 }
6597
6598 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6599                             dma_addr_t map, u32 len, u32 flags,
6600                             u32 mss, u32 vlan)
6601 {
6602         struct tg3 *tp = tnapi->tp;
6603         bool hwbug = false;
6604
6605         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6606                 hwbug = true;
6607
6608         if (tg3_4g_overflow_test(map, len))
6609                 hwbug = true;
6610
6611         if (tg3_40bit_overflow_test(tp, map, len))
6612                 hwbug = true;
6613
6614         if (tp->dma_limit) {
6615                 u32 prvidx = *entry;
6616                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6617                 while (len > tp->dma_limit && *budget) {
6618                         u32 frag_len = tp->dma_limit;
6619                         len -= tp->dma_limit;
6620
6621                         /* Avoid the 8byte DMA problem */
6622                         if (len <= 8) {
6623                                 len += tp->dma_limit / 2;
6624                                 frag_len = tp->dma_limit / 2;
6625                         }
6626
6627                         tnapi->tx_buffers[*entry].fragmented = true;
6628
6629                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6630                                       frag_len, tmp_flag, mss, vlan);
6631                         *budget -= 1;
6632                         prvidx = *entry;
6633                         *entry = NEXT_TX(*entry);
6634
6635                         map += frag_len;
6636                 }
6637
6638                 if (len) {
6639                         if (*budget) {
6640                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6641                                               len, flags, mss, vlan);
6642                                 *budget -= 1;
6643                                 *entry = NEXT_TX(*entry);
6644                         } else {
6645                                 hwbug = true;
6646                                 tnapi->tx_buffers[prvidx].fragmented = false;
6647                         }
6648                 }
6649         } else {
6650                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6651                               len, flags, mss, vlan);
6652                 *entry = NEXT_TX(*entry);
6653         }
6654
6655         return hwbug;
6656 }
6657
6658 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6659 {
6660         int i;
6661         struct sk_buff *skb;
6662         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6663
6664         skb = txb->skb;
6665         txb->skb = NULL;
6666
6667         pci_unmap_single(tnapi->tp->pdev,
6668                          dma_unmap_addr(txb, mapping),
6669                          skb_headlen(skb),
6670                          PCI_DMA_TODEVICE);
6671
6672         while (txb->fragmented) {
6673                 txb->fragmented = false;
6674                 entry = NEXT_TX(entry);
6675                 txb = &tnapi->tx_buffers[entry];
6676         }
6677
6678         for (i = 0; i <= last; i++) {
6679                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6680
6681                 entry = NEXT_TX(entry);
6682                 txb = &tnapi->tx_buffers[entry];
6683
6684                 pci_unmap_page(tnapi->tp->pdev,
6685                                dma_unmap_addr(txb, mapping),
6686                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6687
6688                 while (txb->fragmented) {
6689                         txb->fragmented = false;
6690                         entry = NEXT_TX(entry);
6691                         txb = &tnapi->tx_buffers[entry];
6692                 }
6693         }
6694 }
6695
6696 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6697 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6698                                        struct sk_buff **pskb,
6699                                        u32 *entry, u32 *budget,
6700                                        u32 base_flags, u32 mss, u32 vlan)
6701 {
6702         struct tg3 *tp = tnapi->tp;
6703         struct sk_buff *new_skb, *skb = *pskb;
6704         dma_addr_t new_addr = 0;
6705         int ret = 0;
6706
6707         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6708                 new_skb = skb_copy(skb, GFP_ATOMIC);
6709         else {
6710                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6711
6712                 new_skb = skb_copy_expand(skb,
6713                                           skb_headroom(skb) + more_headroom,
6714                                           skb_tailroom(skb), GFP_ATOMIC);
6715         }
6716
6717         if (!new_skb) {
6718                 ret = -1;
6719         } else {
6720                 /* New SKB is guaranteed to be linear. */
6721                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6722                                           PCI_DMA_TODEVICE);
6723                 /* Make sure the mapping succeeded */
6724                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6725                         dev_kfree_skb(new_skb);
6726                         ret = -1;
6727                 } else {
6728                         u32 save_entry = *entry;
6729
6730                         base_flags |= TXD_FLAG_END;
6731
6732                         tnapi->tx_buffers[*entry].skb = new_skb;
6733                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6734                                            mapping, new_addr);
6735
6736                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6737                                             new_skb->len, base_flags,
6738                                             mss, vlan)) {
6739                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6740                                 dev_kfree_skb(new_skb);
6741                                 ret = -1;
6742                         }
6743                 }
6744         }
6745
6746         dev_kfree_skb(skb);
6747         *pskb = new_skb;
6748         return ret;
6749 }
6750
6751 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6752
6753 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6754  * TSO header is greater than 80 bytes.
6755  */
6756 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6757 {
6758         struct sk_buff *segs, *nskb;
6759         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6760
6761         /* Estimate the number of fragments in the worst case */
6762         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6763                 netif_stop_queue(tp->dev);
6764
6765                 /* netif_tx_stop_queue() must be done before checking
6766                  * checking tx index in tg3_tx_avail() below, because in
6767                  * tg3_tx(), we update tx index before checking for
6768                  * netif_tx_queue_stopped().
6769                  */
6770                 smp_mb();
6771                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6772                         return NETDEV_TX_BUSY;
6773
6774                 netif_wake_queue(tp->dev);
6775         }
6776
6777         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6778         if (IS_ERR(segs))
6779                 goto tg3_tso_bug_end;
6780
6781         do {
6782                 nskb = segs;
6783                 segs = segs->next;
6784                 nskb->next = NULL;
6785                 tg3_start_xmit(nskb, tp->dev);
6786         } while (segs);
6787
6788 tg3_tso_bug_end:
6789         dev_kfree_skb(skb);
6790
6791         return NETDEV_TX_OK;
6792 }
6793
6794 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6795  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6796  */
6797 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6798 {
6799         struct tg3 *tp = netdev_priv(dev);
6800         u32 len, entry, base_flags, mss, vlan = 0;
6801         u32 budget;
6802         int i = -1, would_hit_hwbug;
6803         dma_addr_t mapping;
6804         struct tg3_napi *tnapi;
6805         struct netdev_queue *txq;
6806         unsigned int last;
6807
6808         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6809         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6810         if (tg3_flag(tp, ENABLE_TSS))
6811                 tnapi++;
6812
6813         budget = tg3_tx_avail(tnapi);
6814
6815         /* We are running in BH disabled context with netif_tx_lock
6816          * and TX reclaim runs via tp->napi.poll inside of a software
6817          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6818          * no IRQ context deadlocks to worry about either.  Rejoice!
6819          */
6820         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6821                 if (!netif_tx_queue_stopped(txq)) {
6822                         netif_tx_stop_queue(txq);
6823
6824                         /* This is a hard error, log it. */
6825                         netdev_err(dev,
6826                                    "BUG! Tx Ring full when queue awake!\n");
6827                 }
6828                 return NETDEV_TX_BUSY;
6829         }
6830
6831         entry = tnapi->tx_prod;
6832         base_flags = 0;
6833         if (skb->ip_summed == CHECKSUM_PARTIAL)
6834                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6835
6836         mss = skb_shinfo(skb)->gso_size;
6837         if (mss) {
6838                 struct iphdr *iph;
6839                 u32 tcp_opt_len, hdr_len;
6840
6841                 if (skb_header_cloned(skb) &&
6842                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6843                         goto drop;
6844
6845                 iph = ip_hdr(skb);
6846                 tcp_opt_len = tcp_optlen(skb);
6847
6848                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6849
6850                 if (!skb_is_gso_v6(skb)) {
6851                         iph->check = 0;
6852                         iph->tot_len = htons(mss + hdr_len);
6853                 }
6854
6855                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6856                     tg3_flag(tp, TSO_BUG))
6857                         return tg3_tso_bug(tp, skb);
6858
6859                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6860                                TXD_FLAG_CPU_POST_DMA);
6861
6862                 if (tg3_flag(tp, HW_TSO_1) ||
6863                     tg3_flag(tp, HW_TSO_2) ||
6864                     tg3_flag(tp, HW_TSO_3)) {
6865                         tcp_hdr(skb)->check = 0;
6866                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6867                 } else
6868                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6869                                                                  iph->daddr, 0,
6870                                                                  IPPROTO_TCP,
6871                                                                  0);
6872
6873                 if (tg3_flag(tp, HW_TSO_3)) {
6874                         mss |= (hdr_len & 0xc) << 12;
6875                         if (hdr_len & 0x10)
6876                                 base_flags |= 0x00000010;
6877                         base_flags |= (hdr_len & 0x3e0) << 5;
6878                 } else if (tg3_flag(tp, HW_TSO_2))
6879                         mss |= hdr_len << 9;
6880                 else if (tg3_flag(tp, HW_TSO_1) ||
6881                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6882                         if (tcp_opt_len || iph->ihl > 5) {
6883                                 int tsflags;
6884
6885                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6886                                 mss |= (tsflags << 11);
6887                         }
6888                 } else {
6889                         if (tcp_opt_len || iph->ihl > 5) {
6890                                 int tsflags;
6891
6892                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6893                                 base_flags |= tsflags << 12;
6894                         }
6895                 }
6896         }
6897
6898         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6899             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6900                 base_flags |= TXD_FLAG_JMB_PKT;
6901
6902         if (vlan_tx_tag_present(skb)) {
6903                 base_flags |= TXD_FLAG_VLAN;
6904                 vlan = vlan_tx_tag_get(skb);
6905         }
6906
6907         len = skb_headlen(skb);
6908
6909         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6910         if (pci_dma_mapping_error(tp->pdev, mapping))
6911                 goto drop;
6912
6913
6914         tnapi->tx_buffers[entry].skb = skb;
6915         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6916
6917         would_hit_hwbug = 0;
6918
6919         if (tg3_flag(tp, 5701_DMA_BUG))
6920                 would_hit_hwbug = 1;
6921
6922         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6923                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6924                             mss, vlan)) {
6925                 would_hit_hwbug = 1;
6926         } else if (skb_shinfo(skb)->nr_frags > 0) {
6927                 u32 tmp_mss = mss;
6928
6929                 if (!tg3_flag(tp, HW_TSO_1) &&
6930                     !tg3_flag(tp, HW_TSO_2) &&
6931                     !tg3_flag(tp, HW_TSO_3))
6932                         tmp_mss = 0;
6933
6934                 /* Now loop through additional data
6935                  * fragments, and queue them.
6936                  */
6937                 last = skb_shinfo(skb)->nr_frags - 1;
6938                 for (i = 0; i <= last; i++) {
6939                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6940
6941                         len = skb_frag_size(frag);
6942                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6943                                                    len, DMA_TO_DEVICE);
6944
6945                         tnapi->tx_buffers[entry].skb = NULL;
6946                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6947                                            mapping);
6948                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6949                                 goto dma_error;
6950
6951                         if (!budget ||
6952                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6953                                             len, base_flags |
6954                                             ((i == last) ? TXD_FLAG_END : 0),
6955                                             tmp_mss, vlan)) {
6956                                 would_hit_hwbug = 1;
6957                                 break;
6958                         }
6959                 }
6960         }
6961
6962         if (would_hit_hwbug) {
6963                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6964
6965                 /* If the workaround fails due to memory/mapping
6966                  * failure, silently drop this packet.
6967                  */
6968                 entry = tnapi->tx_prod;
6969                 budget = tg3_tx_avail(tnapi);
6970                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6971                                                 base_flags, mss, vlan))
6972                         goto drop_nofree;
6973         }
6974
6975         skb_tx_timestamp(skb);
6976         netdev_sent_queue(tp->dev, skb->len);
6977
6978         /* Packets are ready, update Tx producer idx local and on card. */
6979         tw32_tx_mbox(tnapi->prodmbox, entry);
6980
6981         tnapi->tx_prod = entry;
6982         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6983                 netif_tx_stop_queue(txq);
6984
6985                 /* netif_tx_stop_queue() must be done before checking
6986                  * checking tx index in tg3_tx_avail() below, because in
6987                  * tg3_tx(), we update tx index before checking for
6988                  * netif_tx_queue_stopped().
6989                  */
6990                 smp_mb();
6991                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6992                         netif_tx_wake_queue(txq);
6993         }
6994
6995         mmiowb();
6996         return NETDEV_TX_OK;
6997
6998 dma_error:
6999         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7000         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7001 drop:
7002         dev_kfree_skb(skb);
7003 drop_nofree:
7004         tp->tx_dropped++;
7005         return NETDEV_TX_OK;
7006 }
7007
7008 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7009 {
7010         if (enable) {
7011                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7012                                   MAC_MODE_PORT_MODE_MASK);
7013
7014                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7015
7016                 if (!tg3_flag(tp, 5705_PLUS))
7017                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7018
7019                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7020                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7021                 else
7022                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7023         } else {
7024                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7025
7026                 if (tg3_flag(tp, 5705_PLUS) ||
7027                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7028                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7029                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7030         }
7031
7032         tw32(MAC_MODE, tp->mac_mode);
7033         udelay(40);
7034 }
7035
7036 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7037 {
7038         u32 val, bmcr, mac_mode, ptest = 0;
7039
7040         tg3_phy_toggle_apd(tp, false);
7041         tg3_phy_toggle_automdix(tp, 0);
7042
7043         if (extlpbk && tg3_phy_set_extloopbk(tp))
7044                 return -EIO;
7045
7046         bmcr = BMCR_FULLDPLX;
7047         switch (speed) {
7048         case SPEED_10:
7049                 break;
7050         case SPEED_100:
7051                 bmcr |= BMCR_SPEED100;
7052                 break;
7053         case SPEED_1000:
7054         default:
7055                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7056                         speed = SPEED_100;
7057                         bmcr |= BMCR_SPEED100;
7058                 } else {
7059                         speed = SPEED_1000;
7060                         bmcr |= BMCR_SPEED1000;
7061                 }
7062         }
7063
7064         if (extlpbk) {
7065                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7066                         tg3_readphy(tp, MII_CTRL1000, &val);
7067                         val |= CTL1000_AS_MASTER |
7068                                CTL1000_ENABLE_MASTER;
7069                         tg3_writephy(tp, MII_CTRL1000, val);
7070                 } else {
7071                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7072                                 MII_TG3_FET_PTEST_TRIM_2;
7073                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7074                 }
7075         } else
7076                 bmcr |= BMCR_LOOPBACK;
7077
7078         tg3_writephy(tp, MII_BMCR, bmcr);
7079
7080         /* The write needs to be flushed for the FETs */
7081         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7082                 tg3_readphy(tp, MII_BMCR, &bmcr);
7083
7084         udelay(40);
7085
7086         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7088                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7089                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7090                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7091
7092                 /* The write needs to be flushed for the AC131 */
7093                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7094         }
7095
7096         /* Reset to prevent losing 1st rx packet intermittently */
7097         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7098             tg3_flag(tp, 5780_CLASS)) {
7099                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7100                 udelay(10);
7101                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7102         }
7103
7104         mac_mode = tp->mac_mode &
7105                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7106         if (speed == SPEED_1000)
7107                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7108         else
7109                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7110
7111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7112                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7113
7114                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7115                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7116                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7117                         mac_mode |= MAC_MODE_LINK_POLARITY;
7118
7119                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7120                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7121         }
7122
7123         tw32(MAC_MODE, mac_mode);
7124         udelay(40);
7125
7126         return 0;
7127 }
7128
7129 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7130 {
7131         struct tg3 *tp = netdev_priv(dev);
7132
7133         if (features & NETIF_F_LOOPBACK) {
7134                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7135                         return;
7136
7137                 spin_lock_bh(&tp->lock);
7138                 tg3_mac_loopback(tp, true);
7139                 netif_carrier_on(tp->dev);
7140                 spin_unlock_bh(&tp->lock);
7141                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7142         } else {
7143                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7144                         return;
7145
7146                 spin_lock_bh(&tp->lock);
7147                 tg3_mac_loopback(tp, false);
7148                 /* Force link status check */
7149                 tg3_setup_phy(tp, 1);
7150                 spin_unlock_bh(&tp->lock);
7151                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7152         }
7153 }
7154
7155 static netdev_features_t tg3_fix_features(struct net_device *dev,
7156         netdev_features_t features)
7157 {
7158         struct tg3 *tp = netdev_priv(dev);
7159
7160         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7161                 features &= ~NETIF_F_ALL_TSO;
7162
7163         return features;
7164 }
7165
7166 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7167 {
7168         netdev_features_t changed = dev->features ^ features;
7169
7170         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7171                 tg3_set_loopback(dev, features);
7172
7173         return 0;
7174 }
7175
7176 static void tg3_rx_prodring_free(struct tg3 *tp,
7177                                  struct tg3_rx_prodring_set *tpr)
7178 {
7179         int i;
7180
7181         if (tpr != &tp->napi[0].prodring) {
7182                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7183                      i = (i + 1) & tp->rx_std_ring_mask)
7184                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7185                                         tp->rx_pkt_map_sz);
7186
7187                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7188                         for (i = tpr->rx_jmb_cons_idx;
7189                              i != tpr->rx_jmb_prod_idx;
7190                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7191                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7192                                                 TG3_RX_JMB_MAP_SZ);
7193                         }
7194                 }
7195
7196                 return;
7197         }
7198
7199         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7200                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7201                                 tp->rx_pkt_map_sz);
7202
7203         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7204                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7205                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7206                                         TG3_RX_JMB_MAP_SZ);
7207         }
7208 }
7209
7210 /* Initialize rx rings for packet processing.
7211  *
7212  * The chip has been shut down and the driver detached from
7213  * the networking, so no interrupts or new tx packets will
7214  * end up in the driver.  tp->{tx,}lock are held and thus
7215  * we may not sleep.
7216  */
7217 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7218                                  struct tg3_rx_prodring_set *tpr)
7219 {
7220         u32 i, rx_pkt_dma_sz;
7221
7222         tpr->rx_std_cons_idx = 0;
7223         tpr->rx_std_prod_idx = 0;
7224         tpr->rx_jmb_cons_idx = 0;
7225         tpr->rx_jmb_prod_idx = 0;
7226
7227         if (tpr != &tp->napi[0].prodring) {
7228                 memset(&tpr->rx_std_buffers[0], 0,
7229                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7230                 if (tpr->rx_jmb_buffers)
7231                         memset(&tpr->rx_jmb_buffers[0], 0,
7232                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7233                 goto done;
7234         }
7235
7236         /* Zero out all descriptors. */
7237         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7238
7239         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7240         if (tg3_flag(tp, 5780_CLASS) &&
7241             tp->dev->mtu > ETH_DATA_LEN)
7242                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7243         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7244
7245         /* Initialize invariants of the rings, we only set this
7246          * stuff once.  This works because the card does not
7247          * write into the rx buffer posting rings.
7248          */
7249         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7250                 struct tg3_rx_buffer_desc *rxd;
7251
7252                 rxd = &tpr->rx_std[i];
7253                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7254                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7255                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7256                                (i << RXD_OPAQUE_INDEX_SHIFT));
7257         }
7258
7259         /* Now allocate fresh SKBs for each rx ring. */
7260         for (i = 0; i < tp->rx_pending; i++) {
7261                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7262                         netdev_warn(tp->dev,
7263                                     "Using a smaller RX standard ring. Only "
7264                                     "%d out of %d buffers were allocated "
7265                                     "successfully\n", i, tp->rx_pending);
7266                         if (i == 0)
7267                                 goto initfail;
7268                         tp->rx_pending = i;
7269                         break;
7270                 }
7271         }
7272
7273         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7274                 goto done;
7275
7276         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7277
7278         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7279                 goto done;
7280
7281         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7282                 struct tg3_rx_buffer_desc *rxd;
7283
7284                 rxd = &tpr->rx_jmb[i].std;
7285                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7286                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7287                                   RXD_FLAG_JUMBO;
7288                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7289                        (i << RXD_OPAQUE_INDEX_SHIFT));
7290         }
7291
7292         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7293                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7294                         netdev_warn(tp->dev,
7295                                     "Using a smaller RX jumbo ring. Only %d "
7296                                     "out of %d buffers were allocated "
7297                                     "successfully\n", i, tp->rx_jumbo_pending);
7298                         if (i == 0)
7299                                 goto initfail;
7300                         tp->rx_jumbo_pending = i;
7301                         break;
7302                 }
7303         }
7304
7305 done:
7306         return 0;
7307
7308 initfail:
7309         tg3_rx_prodring_free(tp, tpr);
7310         return -ENOMEM;
7311 }
7312
7313 static void tg3_rx_prodring_fini(struct tg3 *tp,
7314                                  struct tg3_rx_prodring_set *tpr)
7315 {
7316         kfree(tpr->rx_std_buffers);
7317         tpr->rx_std_buffers = NULL;
7318         kfree(tpr->rx_jmb_buffers);
7319         tpr->rx_jmb_buffers = NULL;
7320         if (tpr->rx_std) {
7321                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7322                                   tpr->rx_std, tpr->rx_std_mapping);
7323                 tpr->rx_std = NULL;
7324         }
7325         if (tpr->rx_jmb) {
7326                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7327                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7328                 tpr->rx_jmb = NULL;
7329         }
7330 }
7331
7332 static int tg3_rx_prodring_init(struct tg3 *tp,
7333                                 struct tg3_rx_prodring_set *tpr)
7334 {
7335         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7336                                       GFP_KERNEL);
7337         if (!tpr->rx_std_buffers)
7338                 return -ENOMEM;
7339
7340         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7341                                          TG3_RX_STD_RING_BYTES(tp),
7342                                          &tpr->rx_std_mapping,
7343                                          GFP_KERNEL);
7344         if (!tpr->rx_std)
7345                 goto err_out;
7346
7347         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7348                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7349                                               GFP_KERNEL);
7350                 if (!tpr->rx_jmb_buffers)
7351                         goto err_out;
7352
7353                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7354                                                  TG3_RX_JMB_RING_BYTES(tp),
7355                                                  &tpr->rx_jmb_mapping,
7356                                                  GFP_KERNEL);
7357                 if (!tpr->rx_jmb)
7358                         goto err_out;
7359         }
7360
7361         return 0;
7362
7363 err_out:
7364         tg3_rx_prodring_fini(tp, tpr);
7365         return -ENOMEM;
7366 }
7367
7368 /* Free up pending packets in all rx/tx rings.
7369  *
7370  * The chip has been shut down and the driver detached from
7371  * the networking, so no interrupts or new tx packets will
7372  * end up in the driver.  tp->{tx,}lock is not held and we are not
7373  * in an interrupt context and thus may sleep.
7374  */
7375 static void tg3_free_rings(struct tg3 *tp)
7376 {
7377         int i, j;
7378
7379         for (j = 0; j < tp->irq_cnt; j++) {
7380                 struct tg3_napi *tnapi = &tp->napi[j];
7381
7382                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7383
7384                 if (!tnapi->tx_buffers)
7385                         continue;
7386
7387                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7388                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7389
7390                         if (!skb)
7391                                 continue;
7392
7393                         tg3_tx_skb_unmap(tnapi, i,
7394                                          skb_shinfo(skb)->nr_frags - 1);
7395
7396                         dev_kfree_skb_any(skb);
7397                 }
7398         }
7399         netdev_reset_queue(tp->dev);
7400 }
7401
7402 /* Initialize tx/rx rings for packet processing.
7403  *
7404  * The chip has been shut down and the driver detached from
7405  * the networking, so no interrupts or new tx packets will
7406  * end up in the driver.  tp->{tx,}lock are held and thus
7407  * we may not sleep.
7408  */
7409 static int tg3_init_rings(struct tg3 *tp)
7410 {
7411         int i;
7412
7413         /* Free up all the SKBs. */
7414         tg3_free_rings(tp);
7415
7416         for (i = 0; i < tp->irq_cnt; i++) {
7417                 struct tg3_napi *tnapi = &tp->napi[i];
7418
7419                 tnapi->last_tag = 0;
7420                 tnapi->last_irq_tag = 0;
7421                 tnapi->hw_status->status = 0;
7422                 tnapi->hw_status->status_tag = 0;
7423                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7424
7425                 tnapi->tx_prod = 0;
7426                 tnapi->tx_cons = 0;
7427                 if (tnapi->tx_ring)
7428                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7429
7430                 tnapi->rx_rcb_ptr = 0;
7431                 if (tnapi->rx_rcb)
7432                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7433
7434                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7435                         tg3_free_rings(tp);
7436                         return -ENOMEM;
7437                 }
7438         }
7439
7440         return 0;
7441 }
7442
7443 /*
7444  * Must not be invoked with interrupt sources disabled and
7445  * the hardware shutdown down.
7446  */
7447 static void tg3_free_consistent(struct tg3 *tp)
7448 {
7449         int i;
7450
7451         for (i = 0; i < tp->irq_cnt; i++) {
7452                 struct tg3_napi *tnapi = &tp->napi[i];
7453
7454                 if (tnapi->tx_ring) {
7455                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7456                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7457                         tnapi->tx_ring = NULL;
7458                 }
7459
7460                 kfree(tnapi->tx_buffers);
7461                 tnapi->tx_buffers = NULL;
7462
7463                 if (tnapi->rx_rcb) {
7464                         dma_free_coherent(&tp->pdev->dev,
7465                                           TG3_RX_RCB_RING_BYTES(tp),
7466                                           tnapi->rx_rcb,
7467                                           tnapi->rx_rcb_mapping);
7468                         tnapi->rx_rcb = NULL;
7469                 }
7470
7471                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7472
7473                 if (tnapi->hw_status) {
7474                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7475                                           tnapi->hw_status,
7476                                           tnapi->status_mapping);
7477                         tnapi->hw_status = NULL;
7478                 }
7479         }
7480
7481         if (tp->hw_stats) {
7482                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7483                                   tp->hw_stats, tp->stats_mapping);
7484                 tp->hw_stats = NULL;
7485         }
7486 }
7487
7488 /*
7489  * Must not be invoked with interrupt sources disabled and
7490  * the hardware shutdown down.  Can sleep.
7491  */
7492 static int tg3_alloc_consistent(struct tg3 *tp)
7493 {
7494         int i;
7495
7496         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7497                                           sizeof(struct tg3_hw_stats),
7498                                           &tp->stats_mapping,
7499                                           GFP_KERNEL);
7500         if (!tp->hw_stats)
7501                 goto err_out;
7502
7503         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7504
7505         for (i = 0; i < tp->irq_cnt; i++) {
7506                 struct tg3_napi *tnapi = &tp->napi[i];
7507                 struct tg3_hw_status *sblk;
7508
7509                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7510                                                       TG3_HW_STATUS_SIZE,
7511                                                       &tnapi->status_mapping,
7512                                                       GFP_KERNEL);
7513                 if (!tnapi->hw_status)
7514                         goto err_out;
7515
7516                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7517                 sblk = tnapi->hw_status;
7518
7519                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7520                         goto err_out;
7521
7522                 /* If multivector TSS is enabled, vector 0 does not handle
7523                  * tx interrupts.  Don't allocate any resources for it.
7524                  */
7525                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7526                     (i && tg3_flag(tp, ENABLE_TSS))) {
7527                         tnapi->tx_buffers = kzalloc(
7528                                                sizeof(struct tg3_tx_ring_info) *
7529                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7530                         if (!tnapi->tx_buffers)
7531                                 goto err_out;
7532
7533                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7534                                                             TG3_TX_RING_BYTES,
7535                                                         &tnapi->tx_desc_mapping,
7536                                                             GFP_KERNEL);
7537                         if (!tnapi->tx_ring)
7538                                 goto err_out;
7539                 }
7540
7541                 /*
7542                  * When RSS is enabled, the status block format changes
7543                  * slightly.  The "rx_jumbo_consumer", "reserved",
7544                  * and "rx_mini_consumer" members get mapped to the
7545                  * other three rx return ring producer indexes.
7546                  */
7547                 switch (i) {
7548                 default:
7549                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7550                         break;
7551                 case 2:
7552                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7553                         break;
7554                 case 3:
7555                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7556                         break;
7557                 case 4:
7558                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7559                         break;
7560                 }
7561
7562                 /*
7563                  * If multivector RSS is enabled, vector 0 does not handle
7564                  * rx or tx interrupts.  Don't allocate any resources for it.
7565                  */
7566                 if (!i && tg3_flag(tp, ENABLE_RSS))
7567                         continue;
7568
7569                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7570                                                    TG3_RX_RCB_RING_BYTES(tp),
7571                                                    &tnapi->rx_rcb_mapping,
7572                                                    GFP_KERNEL);
7573                 if (!tnapi->rx_rcb)
7574                         goto err_out;
7575
7576                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7577         }
7578
7579         return 0;
7580
7581 err_out:
7582         tg3_free_consistent(tp);
7583         return -ENOMEM;
7584 }
7585
7586 #define MAX_WAIT_CNT 1000
7587
7588 /* To stop a block, clear the enable bit and poll till it
7589  * clears.  tp->lock is held.
7590  */
7591 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7592 {
7593         unsigned int i;
7594         u32 val;
7595
7596         if (tg3_flag(tp, 5705_PLUS)) {
7597                 switch (ofs) {
7598                 case RCVLSC_MODE:
7599                 case DMAC_MODE:
7600                 case MBFREE_MODE:
7601                 case BUFMGR_MODE:
7602                 case MEMARB_MODE:
7603                         /* We can't enable/disable these bits of the
7604                          * 5705/5750, just say success.
7605                          */
7606                         return 0;
7607
7608                 default:
7609                         break;
7610                 }
7611         }
7612
7613         val = tr32(ofs);
7614         val &= ~enable_bit;
7615         tw32_f(ofs, val);
7616
7617         for (i = 0; i < MAX_WAIT_CNT; i++) {
7618                 udelay(100);
7619                 val = tr32(ofs);
7620                 if ((val & enable_bit) == 0)
7621                         break;
7622         }
7623
7624         if (i == MAX_WAIT_CNT && !silent) {
7625                 dev_err(&tp->pdev->dev,
7626                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7627                         ofs, enable_bit);
7628                 return -ENODEV;
7629         }
7630
7631         return 0;
7632 }
7633
7634 /* tp->lock is held. */
7635 static int tg3_abort_hw(struct tg3 *tp, int silent)
7636 {
7637         int i, err;
7638
7639         tg3_disable_ints(tp);
7640
7641         tp->rx_mode &= ~RX_MODE_ENABLE;
7642         tw32_f(MAC_RX_MODE, tp->rx_mode);
7643         udelay(10);
7644
7645         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7646         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7647         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7648         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7649         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7650         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7651
7652         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7653         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7654         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7655         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7656         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7657         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7658         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7659
7660         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7661         tw32_f(MAC_MODE, tp->mac_mode);
7662         udelay(40);
7663
7664         tp->tx_mode &= ~TX_MODE_ENABLE;
7665         tw32_f(MAC_TX_MODE, tp->tx_mode);
7666
7667         for (i = 0; i < MAX_WAIT_CNT; i++) {
7668                 udelay(100);
7669                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7670                         break;
7671         }
7672         if (i >= MAX_WAIT_CNT) {
7673                 dev_err(&tp->pdev->dev,
7674                         "%s timed out, TX_MODE_ENABLE will not clear "
7675                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7676                 err |= -ENODEV;
7677         }
7678
7679         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7680         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7681         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7682
7683         tw32(FTQ_RESET, 0xffffffff);
7684         tw32(FTQ_RESET, 0x00000000);
7685
7686         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7687         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7688
7689         for (i = 0; i < tp->irq_cnt; i++) {
7690                 struct tg3_napi *tnapi = &tp->napi[i];
7691                 if (tnapi->hw_status)
7692                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7693         }
7694
7695         return err;
7696 }
7697
7698 /* Save PCI command register before chip reset */
7699 static void tg3_save_pci_state(struct tg3 *tp)
7700 {
7701         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7702 }
7703
7704 /* Restore PCI state after chip reset */
7705 static void tg3_restore_pci_state(struct tg3 *tp)
7706 {
7707         u32 val;
7708
7709         /* Re-enable indirect register accesses. */
7710         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7711                                tp->misc_host_ctrl);
7712
7713         /* Set MAX PCI retry to zero. */
7714         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7715         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7716             tg3_flag(tp, PCIX_MODE))
7717                 val |= PCISTATE_RETRY_SAME_DMA;
7718         /* Allow reads and writes to the APE register and memory space. */
7719         if (tg3_flag(tp, ENABLE_APE))
7720                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7721                        PCISTATE_ALLOW_APE_SHMEM_WR |
7722                        PCISTATE_ALLOW_APE_PSPACE_WR;
7723         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7724
7725         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7726
7727         if (!tg3_flag(tp, PCI_EXPRESS)) {
7728                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7729                                       tp->pci_cacheline_sz);
7730                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7731                                       tp->pci_lat_timer);
7732         }
7733
7734         /* Make sure PCI-X relaxed ordering bit is clear. */
7735         if (tg3_flag(tp, PCIX_MODE)) {
7736                 u16 pcix_cmd;
7737
7738                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7739                                      &pcix_cmd);
7740                 pcix_cmd &= ~PCI_X_CMD_ERO;
7741                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7742                                       pcix_cmd);
7743         }
7744
7745         if (tg3_flag(tp, 5780_CLASS)) {
7746
7747                 /* Chip reset on 5780 will reset MSI enable bit,
7748                  * so need to restore it.
7749                  */
7750                 if (tg3_flag(tp, USING_MSI)) {
7751                         u16 ctrl;
7752
7753                         pci_read_config_word(tp->pdev,
7754                                              tp->msi_cap + PCI_MSI_FLAGS,
7755                                              &ctrl);
7756                         pci_write_config_word(tp->pdev,
7757                                               tp->msi_cap + PCI_MSI_FLAGS,
7758                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7759                         val = tr32(MSGINT_MODE);
7760                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7761                 }
7762         }
7763 }
7764
7765 /* tp->lock is held. */
7766 static int tg3_chip_reset(struct tg3 *tp)
7767 {
7768         u32 val;
7769         void (*write_op)(struct tg3 *, u32, u32);
7770         int i, err;
7771
7772         tg3_nvram_lock(tp);
7773
7774         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7775
7776         /* No matching tg3_nvram_unlock() after this because
7777          * chip reset below will undo the nvram lock.
7778          */
7779         tp->nvram_lock_cnt = 0;
7780
7781         /* GRC_MISC_CFG core clock reset will clear the memory
7782          * enable bit in PCI register 4 and the MSI enable bit
7783          * on some chips, so we save relevant registers here.
7784          */
7785         tg3_save_pci_state(tp);
7786
7787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7788             tg3_flag(tp, 5755_PLUS))
7789                 tw32(GRC_FASTBOOT_PC, 0);
7790
7791         /*
7792          * We must avoid the readl() that normally takes place.
7793          * It locks machines, causes machine checks, and other
7794          * fun things.  So, temporarily disable the 5701
7795          * hardware workaround, while we do the reset.
7796          */
7797         write_op = tp->write32;
7798         if (write_op == tg3_write_flush_reg32)
7799                 tp->write32 = tg3_write32;
7800
7801         /* Prevent the irq handler from reading or writing PCI registers
7802          * during chip reset when the memory enable bit in the PCI command
7803          * register may be cleared.  The chip does not generate interrupt
7804          * at this time, but the irq handler may still be called due to irq
7805          * sharing or irqpoll.
7806          */
7807         tg3_flag_set(tp, CHIP_RESETTING);
7808         for (i = 0; i < tp->irq_cnt; i++) {
7809                 struct tg3_napi *tnapi = &tp->napi[i];
7810                 if (tnapi->hw_status) {
7811                         tnapi->hw_status->status = 0;
7812                         tnapi->hw_status->status_tag = 0;
7813                 }
7814                 tnapi->last_tag = 0;
7815                 tnapi->last_irq_tag = 0;
7816         }
7817         smp_mb();
7818
7819         for (i = 0; i < tp->irq_cnt; i++)
7820                 synchronize_irq(tp->napi[i].irq_vec);
7821
7822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7823                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7824                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7825         }
7826
7827         /* do the reset */
7828         val = GRC_MISC_CFG_CORECLK_RESET;
7829
7830         if (tg3_flag(tp, PCI_EXPRESS)) {
7831                 /* Force PCIe 1.0a mode */
7832                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7833                     !tg3_flag(tp, 57765_PLUS) &&
7834                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7835                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7836                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7837
7838                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7839                         tw32(GRC_MISC_CFG, (1 << 29));
7840                         val |= (1 << 29);
7841                 }
7842         }
7843
7844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7845                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7846                 tw32(GRC_VCPU_EXT_CTRL,
7847                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7848         }
7849
7850         /* Manage gphy power for all CPMU absent PCIe devices. */
7851         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7852                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7853
7854         tw32(GRC_MISC_CFG, val);
7855
7856         /* restore 5701 hardware bug workaround write method */
7857         tp->write32 = write_op;
7858
7859         /* Unfortunately, we have to delay before the PCI read back.
7860          * Some 575X chips even will not respond to a PCI cfg access
7861          * when the reset command is given to the chip.
7862          *
7863          * How do these hardware designers expect things to work
7864          * properly if the PCI write is posted for a long period
7865          * of time?  It is always necessary to have some method by
7866          * which a register read back can occur to push the write
7867          * out which does the reset.
7868          *
7869          * For most tg3 variants the trick below was working.
7870          * Ho hum...
7871          */
7872         udelay(120);
7873
7874         /* Flush PCI posted writes.  The normal MMIO registers
7875          * are inaccessible at this time so this is the only
7876          * way to make this reliably (actually, this is no longer
7877          * the case, see above).  I tried to use indirect
7878          * register read/write but this upset some 5701 variants.
7879          */
7880         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7881
7882         udelay(120);
7883
7884         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7885                 u16 val16;
7886
7887                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7888                         int i;
7889                         u32 cfg_val;
7890
7891                         /* Wait for link training to complete.  */
7892                         for (i = 0; i < 5000; i++)
7893                                 udelay(100);
7894
7895                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7896                         pci_write_config_dword(tp->pdev, 0xc4,
7897                                                cfg_val | (1 << 15));
7898                 }
7899
7900                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7901                 pci_read_config_word(tp->pdev,
7902                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7903                                      &val16);
7904                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7905                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7906                 /*
7907                  * Older PCIe devices only support the 128 byte
7908                  * MPS setting.  Enforce the restriction.
7909                  */
7910                 if (!tg3_flag(tp, CPMU_PRESENT))
7911                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7912                 pci_write_config_word(tp->pdev,
7913                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7914                                       val16);
7915
7916                 /* Clear error status */
7917                 pci_write_config_word(tp->pdev,
7918                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7919                                       PCI_EXP_DEVSTA_CED |
7920                                       PCI_EXP_DEVSTA_NFED |
7921                                       PCI_EXP_DEVSTA_FED |
7922                                       PCI_EXP_DEVSTA_URD);
7923         }
7924
7925         tg3_restore_pci_state(tp);
7926
7927         tg3_flag_clear(tp, CHIP_RESETTING);
7928         tg3_flag_clear(tp, ERROR_PROCESSED);
7929
7930         val = 0;
7931         if (tg3_flag(tp, 5780_CLASS))
7932                 val = tr32(MEMARB_MODE);
7933         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7934
7935         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7936                 tg3_stop_fw(tp);
7937                 tw32(0x5000, 0x400);
7938         }
7939
7940         tw32(GRC_MODE, tp->grc_mode);
7941
7942         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7943                 val = tr32(0xc4);
7944
7945                 tw32(0xc4, val | (1 << 15));
7946         }
7947
7948         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7950                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7951                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7952                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7953                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7954         }
7955
7956         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7957                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7958                 val = tp->mac_mode;
7959         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7960                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7961                 val = tp->mac_mode;
7962         } else
7963                 val = 0;
7964
7965         tw32_f(MAC_MODE, val);
7966         udelay(40);
7967
7968         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7969
7970         err = tg3_poll_fw(tp);
7971         if (err)
7972                 return err;
7973
7974         tg3_mdio_start(tp);
7975
7976         if (tg3_flag(tp, PCI_EXPRESS) &&
7977             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7978             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7979             !tg3_flag(tp, 57765_PLUS)) {
7980                 val = tr32(0x7c00);
7981
7982                 tw32(0x7c00, val | (1 << 25));
7983         }
7984
7985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7986                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7987                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7988         }
7989
7990         /* Reprobe ASF enable state.  */
7991         tg3_flag_clear(tp, ENABLE_ASF);
7992         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7993         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7994         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7995                 u32 nic_cfg;
7996
7997                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7998                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7999                         tg3_flag_set(tp, ENABLE_ASF);
8000                         tp->last_event_jiffies = jiffies;
8001                         if (tg3_flag(tp, 5750_PLUS))
8002                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8003                 }
8004         }
8005
8006         return 0;
8007 }
8008
8009 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8010                                                  struct rtnl_link_stats64 *);
8011 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8012                                                 struct tg3_ethtool_stats *);
8013
8014 /* tp->lock is held. */
8015 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8016 {
8017         int err;
8018
8019         tg3_stop_fw(tp);
8020
8021         tg3_write_sig_pre_reset(tp, kind);
8022
8023         tg3_abort_hw(tp, silent);
8024         err = tg3_chip_reset(tp);
8025
8026         __tg3_set_mac_addr(tp, 0);
8027
8028         tg3_write_sig_legacy(tp, kind);
8029         tg3_write_sig_post_reset(tp, kind);
8030
8031         if (tp->hw_stats) {
8032                 /* Save the stats across chip resets... */
8033                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8034                 tg3_get_estats(tp, &tp->estats_prev);
8035
8036                 /* And make sure the next sample is new data */
8037                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8038         }
8039
8040         if (err)
8041                 return err;
8042
8043         return 0;
8044 }
8045
8046 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8047 {
8048         struct tg3 *tp = netdev_priv(dev);
8049         struct sockaddr *addr = p;
8050         int err = 0, skip_mac_1 = 0;
8051
8052         if (!is_valid_ether_addr(addr->sa_data))
8053                 return -EINVAL;
8054
8055         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8056
8057         if (!netif_running(dev))
8058                 return 0;
8059
8060         if (tg3_flag(tp, ENABLE_ASF)) {
8061                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8062
8063                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8064                 addr0_low = tr32(MAC_ADDR_0_LOW);
8065                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8066                 addr1_low = tr32(MAC_ADDR_1_LOW);
8067
8068                 /* Skip MAC addr 1 if ASF is using it. */
8069                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8070                     !(addr1_high == 0 && addr1_low == 0))
8071                         skip_mac_1 = 1;
8072         }
8073         spin_lock_bh(&tp->lock);
8074         __tg3_set_mac_addr(tp, skip_mac_1);
8075         spin_unlock_bh(&tp->lock);
8076
8077         return err;
8078 }
8079
8080 /* tp->lock is held. */
8081 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8082                            dma_addr_t mapping, u32 maxlen_flags,
8083                            u32 nic_addr)
8084 {
8085         tg3_write_mem(tp,
8086                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8087                       ((u64) mapping >> 32));
8088         tg3_write_mem(tp,
8089                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8090                       ((u64) mapping & 0xffffffff));
8091         tg3_write_mem(tp,
8092                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8093                        maxlen_flags);
8094
8095         if (!tg3_flag(tp, 5705_PLUS))
8096                 tg3_write_mem(tp,
8097                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8098                               nic_addr);
8099 }
8100
8101 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8102 {
8103         int i;
8104
8105         if (!tg3_flag(tp, ENABLE_TSS)) {
8106                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8107                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8108                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8109         } else {
8110                 tw32(HOSTCC_TXCOL_TICKS, 0);
8111                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8112                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8113         }
8114
8115         if (!tg3_flag(tp, ENABLE_RSS)) {
8116                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8117                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8118                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8119         } else {
8120                 tw32(HOSTCC_RXCOL_TICKS, 0);
8121                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8122                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8123         }
8124
8125         if (!tg3_flag(tp, 5705_PLUS)) {
8126                 u32 val = ec->stats_block_coalesce_usecs;
8127
8128                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8129                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8130
8131                 if (!netif_carrier_ok(tp->dev))
8132                         val = 0;
8133
8134                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8135         }
8136
8137         for (i = 0; i < tp->irq_cnt - 1; i++) {
8138                 u32 reg;
8139
8140                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8141                 tw32(reg, ec->rx_coalesce_usecs);
8142                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8143                 tw32(reg, ec->rx_max_coalesced_frames);
8144                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8145                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8146
8147                 if (tg3_flag(tp, ENABLE_TSS)) {
8148                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8149                         tw32(reg, ec->tx_coalesce_usecs);
8150                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8151                         tw32(reg, ec->tx_max_coalesced_frames);
8152                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8153                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8154                 }
8155         }
8156
8157         for (; i < tp->irq_max - 1; i++) {
8158                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8159                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8160                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8161
8162                 if (tg3_flag(tp, ENABLE_TSS)) {
8163                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8164                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8165                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8166                 }
8167         }
8168 }
8169
8170 /* tp->lock is held. */
8171 static void tg3_rings_reset(struct tg3 *tp)
8172 {
8173         int i;
8174         u32 stblk, txrcb, rxrcb, limit;
8175         struct tg3_napi *tnapi = &tp->napi[0];
8176
8177         /* Disable all transmit rings but the first. */
8178         if (!tg3_flag(tp, 5705_PLUS))
8179                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8180         else if (tg3_flag(tp, 5717_PLUS))
8181                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8182         else if (tg3_flag(tp, 57765_CLASS))
8183                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8184         else
8185                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8186
8187         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8188              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8189                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8190                               BDINFO_FLAGS_DISABLED);
8191
8192
8193         /* Disable all receive return rings but the first. */
8194         if (tg3_flag(tp, 5717_PLUS))
8195                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8196         else if (!tg3_flag(tp, 5705_PLUS))
8197                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8198         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8199                  tg3_flag(tp, 57765_CLASS))
8200                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8201         else
8202                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8203
8204         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8205              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8206                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8207                               BDINFO_FLAGS_DISABLED);
8208
8209         /* Disable interrupts */
8210         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8211         tp->napi[0].chk_msi_cnt = 0;
8212         tp->napi[0].last_rx_cons = 0;
8213         tp->napi[0].last_tx_cons = 0;
8214
8215         /* Zero mailbox registers. */
8216         if (tg3_flag(tp, SUPPORT_MSIX)) {
8217                 for (i = 1; i < tp->irq_max; i++) {
8218                         tp->napi[i].tx_prod = 0;
8219                         tp->napi[i].tx_cons = 0;
8220                         if (tg3_flag(tp, ENABLE_TSS))
8221                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8222                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8223                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8224                         tp->napi[i].chk_msi_cnt = 0;
8225                         tp->napi[i].last_rx_cons = 0;
8226                         tp->napi[i].last_tx_cons = 0;
8227                 }
8228                 if (!tg3_flag(tp, ENABLE_TSS))
8229                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8230         } else {
8231                 tp->napi[0].tx_prod = 0;
8232                 tp->napi[0].tx_cons = 0;
8233                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8234                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8235         }
8236
8237         /* Make sure the NIC-based send BD rings are disabled. */
8238         if (!tg3_flag(tp, 5705_PLUS)) {
8239                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8240                 for (i = 0; i < 16; i++)
8241                         tw32_tx_mbox(mbox + i * 8, 0);
8242         }
8243
8244         txrcb = NIC_SRAM_SEND_RCB;
8245         rxrcb = NIC_SRAM_RCV_RET_RCB;
8246
8247         /* Clear status block in ram. */
8248         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8249
8250         /* Set status block DMA address */
8251         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8252              ((u64) tnapi->status_mapping >> 32));
8253         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8254              ((u64) tnapi->status_mapping & 0xffffffff));
8255
8256         if (tnapi->tx_ring) {
8257                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8258                                (TG3_TX_RING_SIZE <<
8259                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8260                                NIC_SRAM_TX_BUFFER_DESC);
8261                 txrcb += TG3_BDINFO_SIZE;
8262         }
8263
8264         if (tnapi->rx_rcb) {
8265                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8266                                (tp->rx_ret_ring_mask + 1) <<
8267                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8268                 rxrcb += TG3_BDINFO_SIZE;
8269         }
8270
8271         stblk = HOSTCC_STATBLCK_RING1;
8272
8273         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8274                 u64 mapping = (u64)tnapi->status_mapping;
8275                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8276                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8277
8278                 /* Clear status block in ram. */
8279                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8280
8281                 if (tnapi->tx_ring) {
8282                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8283                                        (TG3_TX_RING_SIZE <<
8284                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8285                                        NIC_SRAM_TX_BUFFER_DESC);
8286                         txrcb += TG3_BDINFO_SIZE;
8287                 }
8288
8289                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8290                                ((tp->rx_ret_ring_mask + 1) <<
8291                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8292
8293                 stblk += 8;
8294                 rxrcb += TG3_BDINFO_SIZE;
8295         }
8296 }
8297
8298 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8299 {
8300         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8301
8302         if (!tg3_flag(tp, 5750_PLUS) ||
8303             tg3_flag(tp, 5780_CLASS) ||
8304             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8305             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8306             tg3_flag(tp, 57765_PLUS))
8307                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8308         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8309                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8310                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8311         else
8312                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8313
8314         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8315         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8316
8317         val = min(nic_rep_thresh, host_rep_thresh);
8318         tw32(RCVBDI_STD_THRESH, val);
8319
8320         if (tg3_flag(tp, 57765_PLUS))
8321                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8322
8323         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8324                 return;
8325
8326         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8327
8328         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8329
8330         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8331         tw32(RCVBDI_JUMBO_THRESH, val);
8332
8333         if (tg3_flag(tp, 57765_PLUS))
8334                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8335 }
8336
8337 static inline u32 calc_crc(unsigned char *buf, int len)
8338 {
8339         u32 reg;
8340         u32 tmp;
8341         int j, k;
8342
8343         reg = 0xffffffff;
8344
8345         for (j = 0; j < len; j++) {
8346                 reg ^= buf[j];
8347
8348                 for (k = 0; k < 8; k++) {
8349                         tmp = reg & 0x01;
8350
8351                         reg >>= 1;
8352
8353                         if (tmp)
8354                                 reg ^= 0xedb88320;
8355                 }
8356         }
8357
8358         return ~reg;
8359 }
8360
8361 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8362 {
8363         /* accept or reject all multicast frames */
8364         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8365         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8366         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8367         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8368 }
8369
8370 static void __tg3_set_rx_mode(struct net_device *dev)
8371 {
8372         struct tg3 *tp = netdev_priv(dev);
8373         u32 rx_mode;
8374
8375         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8376                                   RX_MODE_KEEP_VLAN_TAG);
8377
8378 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8379         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8380          * flag clear.
8381          */
8382         if (!tg3_flag(tp, ENABLE_ASF))
8383                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8384 #endif
8385
8386         if (dev->flags & IFF_PROMISC) {
8387                 /* Promiscuous mode. */
8388                 rx_mode |= RX_MODE_PROMISC;
8389         } else if (dev->flags & IFF_ALLMULTI) {
8390                 /* Accept all multicast. */
8391                 tg3_set_multi(tp, 1);
8392         } else if (netdev_mc_empty(dev)) {
8393                 /* Reject all multicast. */
8394                 tg3_set_multi(tp, 0);
8395         } else {
8396                 /* Accept one or more multicast(s). */
8397                 struct netdev_hw_addr *ha;
8398                 u32 mc_filter[4] = { 0, };
8399                 u32 regidx;
8400                 u32 bit;
8401                 u32 crc;
8402
8403                 netdev_for_each_mc_addr(ha, dev) {
8404                         crc = calc_crc(ha->addr, ETH_ALEN);
8405                         bit = ~crc & 0x7f;
8406                         regidx = (bit & 0x60) >> 5;
8407                         bit &= 0x1f;
8408                         mc_filter[regidx] |= (1 << bit);
8409                 }
8410
8411                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8412                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8413                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8414                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8415         }
8416
8417         if (rx_mode != tp->rx_mode) {
8418                 tp->rx_mode = rx_mode;
8419                 tw32_f(MAC_RX_MODE, rx_mode);
8420                 udelay(10);
8421         }
8422 }
8423
8424 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8425 {
8426         int i;
8427
8428         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8429                 tp->rss_ind_tbl[i] =
8430                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8431 }
8432
8433 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8434 {
8435         int i;
8436
8437         if (!tg3_flag(tp, SUPPORT_MSIX))
8438                 return;
8439
8440         if (tp->irq_cnt <= 2) {
8441                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8442                 return;
8443         }
8444
8445         /* Validate table against current IRQ count */
8446         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8447                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8448                         break;
8449         }
8450
8451         if (i != TG3_RSS_INDIR_TBL_SIZE)
8452                 tg3_rss_init_dflt_indir_tbl(tp);
8453 }
8454
8455 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8456 {
8457         int i = 0;
8458         u32 reg = MAC_RSS_INDIR_TBL_0;
8459
8460         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8461                 u32 val = tp->rss_ind_tbl[i];
8462                 i++;
8463                 for (; i % 8; i++) {
8464                         val <<= 4;
8465                         val |= tp->rss_ind_tbl[i];
8466                 }
8467                 tw32(reg, val);
8468                 reg += 4;
8469         }
8470 }
8471
8472 /* tp->lock is held. */
8473 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8474 {
8475         u32 val, rdmac_mode;
8476         int i, err, limit;
8477         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8478
8479         tg3_disable_ints(tp);
8480
8481         tg3_stop_fw(tp);
8482
8483         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8484
8485         if (tg3_flag(tp, INIT_COMPLETE))
8486                 tg3_abort_hw(tp, 1);
8487
8488         /* Enable MAC control of LPI */
8489         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8490                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8491                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8492                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8493
8494                 tw32_f(TG3_CPMU_EEE_CTRL,
8495                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8496
8497                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8498                       TG3_CPMU_EEEMD_LPI_IN_TX |
8499                       TG3_CPMU_EEEMD_LPI_IN_RX |
8500                       TG3_CPMU_EEEMD_EEE_ENABLE;
8501
8502                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8503                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8504
8505                 if (tg3_flag(tp, ENABLE_APE))
8506                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8507
8508                 tw32_f(TG3_CPMU_EEE_MODE, val);
8509
8510                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8511                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8512                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8513
8514                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8515                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8516                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8517         }
8518
8519         if (reset_phy)
8520                 tg3_phy_reset(tp);
8521
8522         err = tg3_chip_reset(tp);
8523         if (err)
8524                 return err;
8525
8526         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8527
8528         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8529                 val = tr32(TG3_CPMU_CTRL);
8530                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8531                 tw32(TG3_CPMU_CTRL, val);
8532
8533                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8534                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8535                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8536                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8537
8538                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8539                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8540                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8541                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8542
8543                 val = tr32(TG3_CPMU_HST_ACC);
8544                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8545                 val |= CPMU_HST_ACC_MACCLK_6_25;
8546                 tw32(TG3_CPMU_HST_ACC, val);
8547         }
8548
8549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8550                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8551                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8552                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8553                 tw32(PCIE_PWR_MGMT_THRESH, val);
8554
8555                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8556                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8557
8558                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8559
8560                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8561                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8562         }
8563
8564         if (tg3_flag(tp, L1PLLPD_EN)) {
8565                 u32 grc_mode = tr32(GRC_MODE);
8566
8567                 /* Access the lower 1K of PL PCIE block registers. */
8568                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8569                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8570
8571                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8572                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8573                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8574
8575                 tw32(GRC_MODE, grc_mode);
8576         }
8577
8578         if (tg3_flag(tp, 57765_CLASS)) {
8579                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8580                         u32 grc_mode = tr32(GRC_MODE);
8581
8582                         /* Access the lower 1K of PL PCIE block registers. */
8583                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8584                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8585
8586                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8587                                    TG3_PCIE_PL_LO_PHYCTL5);
8588                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8589                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8590
8591                         tw32(GRC_MODE, grc_mode);
8592                 }
8593
8594                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8595                         u32 grc_mode = tr32(GRC_MODE);
8596
8597                         /* Access the lower 1K of DL PCIE block registers. */
8598                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8599                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8600
8601                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8602                                    TG3_PCIE_DL_LO_FTSMAX);
8603                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8604                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8605                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8606
8607                         tw32(GRC_MODE, grc_mode);
8608                 }
8609
8610                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8611                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8612                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8613                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8614         }
8615
8616         /* This works around an issue with Athlon chipsets on
8617          * B3 tigon3 silicon.  This bit has no effect on any
8618          * other revision.  But do not set this on PCI Express
8619          * chips and don't even touch the clocks if the CPMU is present.
8620          */
8621         if (!tg3_flag(tp, CPMU_PRESENT)) {
8622                 if (!tg3_flag(tp, PCI_EXPRESS))
8623                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8624                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8625         }
8626
8627         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8628             tg3_flag(tp, PCIX_MODE)) {
8629                 val = tr32(TG3PCI_PCISTATE);
8630                 val |= PCISTATE_RETRY_SAME_DMA;
8631                 tw32(TG3PCI_PCISTATE, val);
8632         }
8633
8634         if (tg3_flag(tp, ENABLE_APE)) {
8635                 /* Allow reads and writes to the
8636                  * APE register and memory space.
8637                  */
8638                 val = tr32(TG3PCI_PCISTATE);
8639                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8640                        PCISTATE_ALLOW_APE_SHMEM_WR |
8641                        PCISTATE_ALLOW_APE_PSPACE_WR;
8642                 tw32(TG3PCI_PCISTATE, val);
8643         }
8644
8645         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8646                 /* Enable some hw fixes.  */
8647                 val = tr32(TG3PCI_MSI_DATA);
8648                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8649                 tw32(TG3PCI_MSI_DATA, val);
8650         }
8651
8652         /* Descriptor ring init may make accesses to the
8653          * NIC SRAM area to setup the TX descriptors, so we
8654          * can only do this after the hardware has been
8655          * successfully reset.
8656          */
8657         err = tg3_init_rings(tp);
8658         if (err)
8659                 return err;
8660
8661         if (tg3_flag(tp, 57765_PLUS)) {
8662                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8663                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8664                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8665                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8666                 if (!tg3_flag(tp, 57765_CLASS) &&
8667                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8668                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8669                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8670         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8671                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8672                 /* This value is determined during the probe time DMA
8673                  * engine test, tg3_test_dma.
8674                  */
8675                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8676         }
8677
8678         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8679                           GRC_MODE_4X_NIC_SEND_RINGS |
8680                           GRC_MODE_NO_TX_PHDR_CSUM |
8681                           GRC_MODE_NO_RX_PHDR_CSUM);
8682         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8683
8684         /* Pseudo-header checksum is done by hardware logic and not
8685          * the offload processers, so make the chip do the pseudo-
8686          * header checksums on receive.  For transmit it is more
8687          * convenient to do the pseudo-header checksum in software
8688          * as Linux does that on transmit for us in all cases.
8689          */
8690         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8691
8692         tw32(GRC_MODE,
8693              tp->grc_mode |
8694              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8695
8696         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8697         val = tr32(GRC_MISC_CFG);
8698         val &= ~0xff;
8699         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8700         tw32(GRC_MISC_CFG, val);
8701
8702         /* Initialize MBUF/DESC pool. */
8703         if (tg3_flag(tp, 5750_PLUS)) {
8704                 /* Do nothing.  */
8705         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8706                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8707                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8708                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8709                 else
8710                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8711                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8712                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8713         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8714                 int fw_len;
8715
8716                 fw_len = tp->fw_len;
8717                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8718                 tw32(BUFMGR_MB_POOL_ADDR,
8719                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8720                 tw32(BUFMGR_MB_POOL_SIZE,
8721                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8722         }
8723
8724         if (tp->dev->mtu <= ETH_DATA_LEN) {
8725                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8726                      tp->bufmgr_config.mbuf_read_dma_low_water);
8727                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8728                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8729                 tw32(BUFMGR_MB_HIGH_WATER,
8730                      tp->bufmgr_config.mbuf_high_water);
8731         } else {
8732                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8733                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8734                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8735                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8736                 tw32(BUFMGR_MB_HIGH_WATER,
8737                      tp->bufmgr_config.mbuf_high_water_jumbo);
8738         }
8739         tw32(BUFMGR_DMA_LOW_WATER,
8740              tp->bufmgr_config.dma_low_water);
8741         tw32(BUFMGR_DMA_HIGH_WATER,
8742              tp->bufmgr_config.dma_high_water);
8743
8744         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8746                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8748             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8749             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8750                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8751         tw32(BUFMGR_MODE, val);
8752         for (i = 0; i < 2000; i++) {
8753                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8754                         break;
8755                 udelay(10);
8756         }
8757         if (i >= 2000) {
8758                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8759                 return -ENODEV;
8760         }
8761
8762         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8763                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8764
8765         tg3_setup_rxbd_thresholds(tp);
8766
8767         /* Initialize TG3_BDINFO's at:
8768          *  RCVDBDI_STD_BD:     standard eth size rx ring
8769          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8770          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8771          *
8772          * like so:
8773          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8774          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8775          *                              ring attribute flags
8776          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8777          *
8778          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8779          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8780          *
8781          * The size of each ring is fixed in the firmware, but the location is
8782          * configurable.
8783          */
8784         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8785              ((u64) tpr->rx_std_mapping >> 32));
8786         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8787              ((u64) tpr->rx_std_mapping & 0xffffffff));
8788         if (!tg3_flag(tp, 5717_PLUS))
8789                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8790                      NIC_SRAM_RX_BUFFER_DESC);
8791
8792         /* Disable the mini ring */
8793         if (!tg3_flag(tp, 5705_PLUS))
8794                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8795                      BDINFO_FLAGS_DISABLED);
8796
8797         /* Program the jumbo buffer descriptor ring control
8798          * blocks on those devices that have them.
8799          */
8800         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8801             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8802
8803                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8804                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8805                              ((u64) tpr->rx_jmb_mapping >> 32));
8806                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8807                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8808                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8809                               BDINFO_FLAGS_MAXLEN_SHIFT;
8810                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8811                              val | BDINFO_FLAGS_USE_EXT_RECV);
8812                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8813                             tg3_flag(tp, 57765_CLASS))
8814                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8815                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8816                 } else {
8817                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8818                              BDINFO_FLAGS_DISABLED);
8819                 }
8820
8821                 if (tg3_flag(tp, 57765_PLUS)) {
8822                         val = TG3_RX_STD_RING_SIZE(tp);
8823                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8824                         val |= (TG3_RX_STD_DMA_SZ << 2);
8825                 } else
8826                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8827         } else
8828                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8829
8830         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8831
8832         tpr->rx_std_prod_idx = tp->rx_pending;
8833         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8834
8835         tpr->rx_jmb_prod_idx =
8836                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8837         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8838
8839         tg3_rings_reset(tp);
8840
8841         /* Initialize MAC address and backoff seed. */
8842         __tg3_set_mac_addr(tp, 0);
8843
8844         /* MTU + ethernet header + FCS + optional VLAN tag */
8845         tw32(MAC_RX_MTU_SIZE,
8846              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8847
8848         /* The slot time is changed by tg3_setup_phy if we
8849          * run at gigabit with half duplex.
8850          */
8851         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8852               (6 << TX_LENGTHS_IPG_SHIFT) |
8853               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8854
8855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8856                 val |= tr32(MAC_TX_LENGTHS) &
8857                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8858                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8859
8860         tw32(MAC_TX_LENGTHS, val);
8861
8862         /* Receive rules. */
8863         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8864         tw32(RCVLPC_CONFIG, 0x0181);
8865
8866         /* Calculate RDMAC_MODE setting early, we need it to determine
8867          * the RCVLPC_STATE_ENABLE mask.
8868          */
8869         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8870                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8871                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8872                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8873                       RDMAC_MODE_LNGREAD_ENAB);
8874
8875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8876                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8877
8878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8881                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8882                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8883                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8884
8885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8886             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8887                 if (tg3_flag(tp, TSO_CAPABLE) &&
8888                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8889                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8890                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8891                            !tg3_flag(tp, IS_5788)) {
8892                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8893                 }
8894         }
8895
8896         if (tg3_flag(tp, PCI_EXPRESS))
8897                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8898
8899         if (tg3_flag(tp, HW_TSO_1) ||
8900             tg3_flag(tp, HW_TSO_2) ||
8901             tg3_flag(tp, HW_TSO_3))
8902                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8903
8904         if (tg3_flag(tp, 57765_PLUS) ||
8905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8907                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8908
8909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8910                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8911
8912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8913             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8915             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8916             tg3_flag(tp, 57765_PLUS)) {
8917                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8919                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8920                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8921                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8922                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8923                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8924                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8925                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8926                 }
8927                 tw32(TG3_RDMA_RSRVCTRL_REG,
8928                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8929         }
8930
8931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8933                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8934                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8935                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8936                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8937         }
8938
8939         /* Receive/send statistics. */
8940         if (tg3_flag(tp, 5750_PLUS)) {
8941                 val = tr32(RCVLPC_STATS_ENABLE);
8942                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8943                 tw32(RCVLPC_STATS_ENABLE, val);
8944         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8945                    tg3_flag(tp, TSO_CAPABLE)) {
8946                 val = tr32(RCVLPC_STATS_ENABLE);
8947                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8948                 tw32(RCVLPC_STATS_ENABLE, val);
8949         } else {
8950                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8951         }
8952         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8953         tw32(SNDDATAI_STATSENAB, 0xffffff);
8954         tw32(SNDDATAI_STATSCTRL,
8955              (SNDDATAI_SCTRL_ENABLE |
8956               SNDDATAI_SCTRL_FASTUPD));
8957
8958         /* Setup host coalescing engine. */
8959         tw32(HOSTCC_MODE, 0);
8960         for (i = 0; i < 2000; i++) {
8961                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8962                         break;
8963                 udelay(10);
8964         }
8965
8966         __tg3_set_coalesce(tp, &tp->coal);
8967
8968         if (!tg3_flag(tp, 5705_PLUS)) {
8969                 /* Status/statistics block address.  See tg3_timer,
8970                  * the tg3_periodic_fetch_stats call there, and
8971                  * tg3_get_stats to see how this works for 5705/5750 chips.
8972                  */
8973                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8974                      ((u64) tp->stats_mapping >> 32));
8975                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8976                      ((u64) tp->stats_mapping & 0xffffffff));
8977                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8978
8979                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8980
8981                 /* Clear statistics and status block memory areas */
8982                 for (i = NIC_SRAM_STATS_BLK;
8983                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8984                      i += sizeof(u32)) {
8985                         tg3_write_mem(tp, i, 0);
8986                         udelay(40);
8987                 }
8988         }
8989
8990         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8991
8992         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8993         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8994         if (!tg3_flag(tp, 5705_PLUS))
8995                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8996
8997         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8998                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8999                 /* reset to prevent losing 1st rx packet intermittently */
9000                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9001                 udelay(10);
9002         }
9003
9004         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9005                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9006                         MAC_MODE_FHDE_ENABLE;
9007         if (tg3_flag(tp, ENABLE_APE))
9008                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9009         if (!tg3_flag(tp, 5705_PLUS) &&
9010             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9011             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9012                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9013         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9014         udelay(40);
9015
9016         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9017          * If TG3_FLAG_IS_NIC is zero, we should read the
9018          * register to preserve the GPIO settings for LOMs. The GPIOs,
9019          * whether used as inputs or outputs, are set by boot code after
9020          * reset.
9021          */
9022         if (!tg3_flag(tp, IS_NIC)) {
9023                 u32 gpio_mask;
9024
9025                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9026                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9027                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9028
9029                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9030                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9031                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9032
9033                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9034                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9035
9036                 tp->grc_local_ctrl &= ~gpio_mask;
9037                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9038
9039                 /* GPIO1 must be driven high for eeprom write protect */
9040                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9041                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9042                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9043         }
9044         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9045         udelay(100);
9046
9047         if (tg3_flag(tp, USING_MSIX)) {
9048                 val = tr32(MSGINT_MODE);
9049                 val |= MSGINT_MODE_ENABLE;
9050                 if (tp->irq_cnt > 1)
9051                         val |= MSGINT_MODE_MULTIVEC_EN;
9052                 if (!tg3_flag(tp, 1SHOT_MSI))
9053                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9054                 tw32(MSGINT_MODE, val);
9055         }
9056
9057         if (!tg3_flag(tp, 5705_PLUS)) {
9058                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9059                 udelay(40);
9060         }
9061
9062         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9063                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9064                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9065                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9066                WDMAC_MODE_LNGREAD_ENAB);
9067
9068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9069             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9070                 if (tg3_flag(tp, TSO_CAPABLE) &&
9071                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9072                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9073                         /* nothing */
9074                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9075                            !tg3_flag(tp, IS_5788)) {
9076                         val |= WDMAC_MODE_RX_ACCEL;
9077                 }
9078         }
9079
9080         /* Enable host coalescing bug fix */
9081         if (tg3_flag(tp, 5755_PLUS))
9082                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9083
9084         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9085                 val |= WDMAC_MODE_BURST_ALL_DATA;
9086
9087         tw32_f(WDMAC_MODE, val);
9088         udelay(40);
9089
9090         if (tg3_flag(tp, PCIX_MODE)) {
9091                 u16 pcix_cmd;
9092
9093                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9094                                      &pcix_cmd);
9095                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9096                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9097                         pcix_cmd |= PCI_X_CMD_READ_2K;
9098                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9099                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9100                         pcix_cmd |= PCI_X_CMD_READ_2K;
9101                 }
9102                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9103                                       pcix_cmd);
9104         }
9105
9106         tw32_f(RDMAC_MODE, rdmac_mode);
9107         udelay(40);
9108
9109         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9110         if (!tg3_flag(tp, 5705_PLUS))
9111                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9112
9113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9114                 tw32(SNDDATAC_MODE,
9115                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9116         else
9117                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9118
9119         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9120         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9121         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9122         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9123                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9124         tw32(RCVDBDI_MODE, val);
9125         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9126         if (tg3_flag(tp, HW_TSO_1) ||
9127             tg3_flag(tp, HW_TSO_2) ||
9128             tg3_flag(tp, HW_TSO_3))
9129                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9130         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9131         if (tg3_flag(tp, ENABLE_TSS))
9132                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9133         tw32(SNDBDI_MODE, val);
9134         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9135
9136         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9137                 err = tg3_load_5701_a0_firmware_fix(tp);
9138                 if (err)
9139                         return err;
9140         }
9141
9142         if (tg3_flag(tp, TSO_CAPABLE)) {
9143                 err = tg3_load_tso_firmware(tp);
9144                 if (err)
9145                         return err;
9146         }
9147
9148         tp->tx_mode = TX_MODE_ENABLE;
9149
9150         if (tg3_flag(tp, 5755_PLUS) ||
9151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9152                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9153
9154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9155                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9156                 tp->tx_mode &= ~val;
9157                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9158         }
9159
9160         tw32_f(MAC_TX_MODE, tp->tx_mode);
9161         udelay(100);
9162
9163         if (tg3_flag(tp, ENABLE_RSS)) {
9164                 tg3_rss_write_indir_tbl(tp);
9165
9166                 /* Setup the "secret" hash key. */
9167                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9168                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9169                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9170                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9171                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9172                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9173                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9174                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9175                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9176                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9177         }
9178
9179         tp->rx_mode = RX_MODE_ENABLE;
9180         if (tg3_flag(tp, 5755_PLUS))
9181                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9182
9183         if (tg3_flag(tp, ENABLE_RSS))
9184                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9185                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9186                                RX_MODE_RSS_IPV6_HASH_EN |
9187                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9188                                RX_MODE_RSS_IPV4_HASH_EN |
9189                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9190
9191         tw32_f(MAC_RX_MODE, tp->rx_mode);
9192         udelay(10);
9193
9194         tw32(MAC_LED_CTRL, tp->led_ctrl);
9195
9196         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9197         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9198                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9199                 udelay(10);
9200         }
9201         tw32_f(MAC_RX_MODE, tp->rx_mode);
9202         udelay(10);
9203
9204         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9205                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9206                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9207                         /* Set drive transmission level to 1.2V  */
9208                         /* only if the signal pre-emphasis bit is not set  */
9209                         val = tr32(MAC_SERDES_CFG);
9210                         val &= 0xfffff000;
9211                         val |= 0x880;
9212                         tw32(MAC_SERDES_CFG, val);
9213                 }
9214                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9215                         tw32(MAC_SERDES_CFG, 0x616000);
9216         }
9217
9218         /* Prevent chip from dropping frames when flow control
9219          * is enabled.
9220          */
9221         if (tg3_flag(tp, 57765_CLASS))
9222                 val = 1;
9223         else
9224                 val = 2;
9225         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9226
9227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9228             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9229                 /* Use hardware link auto-negotiation */
9230                 tg3_flag_set(tp, HW_AUTONEG);
9231         }
9232
9233         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9234             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9235                 u32 tmp;
9236
9237                 tmp = tr32(SERDES_RX_CTRL);
9238                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9239                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9240                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9241                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9242         }
9243
9244         if (!tg3_flag(tp, USE_PHYLIB)) {
9245                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9246                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9247                         tp->link_config.speed = tp->link_config.orig_speed;
9248                         tp->link_config.duplex = tp->link_config.orig_duplex;
9249                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9250                 }
9251
9252                 err = tg3_setup_phy(tp, 0);
9253                 if (err)
9254                         return err;
9255
9256                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9257                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9258                         u32 tmp;
9259
9260                         /* Clear CRC stats. */
9261                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9262                                 tg3_writephy(tp, MII_TG3_TEST1,
9263                                              tmp | MII_TG3_TEST1_CRC_EN);
9264                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9265                         }
9266                 }
9267         }
9268
9269         __tg3_set_rx_mode(tp->dev);
9270
9271         /* Initialize receive rules. */
9272         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9273         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9274         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9275         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9276
9277         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9278                 limit = 8;
9279         else
9280                 limit = 16;
9281         if (tg3_flag(tp, ENABLE_ASF))
9282                 limit -= 4;
9283         switch (limit) {
9284         case 16:
9285                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9286         case 15:
9287                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9288         case 14:
9289                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9290         case 13:
9291                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9292         case 12:
9293                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9294         case 11:
9295                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9296         case 10:
9297                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9298         case 9:
9299                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9300         case 8:
9301                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9302         case 7:
9303                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9304         case 6:
9305                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9306         case 5:
9307                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9308         case 4:
9309                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9310         case 3:
9311                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9312         case 2:
9313         case 1:
9314
9315         default:
9316                 break;
9317         }
9318
9319         if (tg3_flag(tp, ENABLE_APE))
9320                 /* Write our heartbeat update interval to APE. */
9321                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9322                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9323
9324         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9325
9326         return 0;
9327 }
9328
9329 /* Called at device open time to get the chip ready for
9330  * packet processing.  Invoked with tp->lock held.
9331  */
9332 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9333 {
9334         tg3_switch_clocks(tp);
9335
9336         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9337
9338         return tg3_reset_hw(tp, reset_phy);
9339 }
9340
9341 /* Restart hardware after configuration changes, self-test, etc.
9342  * Invoked with tp->lock held.
9343  */
9344 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9345         __releases(tp->lock)
9346         __acquires(tp->lock)
9347 {
9348         int err;
9349
9350         err = tg3_init_hw(tp, reset_phy);
9351         if (err) {
9352                 netdev_err(tp->dev,
9353                            "Failed to re-initialize device, aborting\n");
9354                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9355                 tg3_full_unlock(tp);
9356                 del_timer_sync(&tp->timer);
9357                 tp->irq_sync = 0;
9358                 tg3_napi_enable(tp);
9359                 dev_close(tp->dev);
9360                 tg3_full_lock(tp, 0);
9361         }
9362         return err;
9363 }
9364
9365 static void tg3_reset_task(struct work_struct *work)
9366 {
9367         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9368         int err;
9369
9370         tg3_full_lock(tp, 0);
9371
9372         if (!netif_running(tp->dev)) {
9373                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9374                 tg3_full_unlock(tp);
9375                 return;
9376         }
9377
9378         tg3_full_unlock(tp);
9379
9380         tg3_phy_stop(tp);
9381
9382         tg3_netif_stop(tp);
9383
9384         tg3_full_lock(tp, 1);
9385
9386         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9387                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9388                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9389                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9390                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9391         }
9392
9393         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9394         err = tg3_init_hw(tp, 1);
9395         if (err)
9396                 goto out;
9397
9398         tg3_netif_start(tp);
9399
9400 out:
9401         tg3_full_unlock(tp);
9402
9403         if (!err)
9404                 tg3_phy_start(tp);
9405
9406         tg3_flag_clear(tp, RESET_TASK_PENDING);
9407 }
9408
9409 #define TG3_STAT_ADD32(PSTAT, REG) \
9410 do {    u32 __val = tr32(REG); \
9411         (PSTAT)->low += __val; \
9412         if ((PSTAT)->low < __val) \
9413                 (PSTAT)->high += 1; \
9414 } while (0)
9415
9416 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9417 {
9418         struct tg3_hw_stats *sp = tp->hw_stats;
9419
9420         if (!netif_carrier_ok(tp->dev))
9421                 return;
9422
9423         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9424         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9425         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9426         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9427         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9428         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9429         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9430         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9431         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9432         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9433         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9434         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9435         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9436
9437         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9438         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9439         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9440         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9441         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9442         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9443         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9444         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9445         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9446         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9447         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9448         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9449         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9450         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9451
9452         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9453         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9454             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9455             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9456                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9457         } else {
9458                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9459                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9460                 if (val) {
9461                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9462                         sp->rx_discards.low += val;
9463                         if (sp->rx_discards.low < val)
9464                                 sp->rx_discards.high += 1;
9465                 }
9466                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9467         }
9468         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9469 }
9470
9471 static void tg3_chk_missed_msi(struct tg3 *tp)
9472 {
9473         u32 i;
9474
9475         for (i = 0; i < tp->irq_cnt; i++) {
9476                 struct tg3_napi *tnapi = &tp->napi[i];
9477
9478                 if (tg3_has_work(tnapi)) {
9479                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9480                             tnapi->last_tx_cons == tnapi->tx_cons) {
9481                                 if (tnapi->chk_msi_cnt < 1) {
9482                                         tnapi->chk_msi_cnt++;
9483                                         return;
9484                                 }
9485                                 tg3_msi(0, tnapi);
9486                         }
9487                 }
9488                 tnapi->chk_msi_cnt = 0;
9489                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9490                 tnapi->last_tx_cons = tnapi->tx_cons;
9491         }
9492 }
9493
9494 static void tg3_timer(unsigned long __opaque)
9495 {
9496         struct tg3 *tp = (struct tg3 *) __opaque;
9497
9498         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9499                 goto restart_timer;
9500
9501         spin_lock(&tp->lock);
9502
9503         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9504             tg3_flag(tp, 57765_CLASS))
9505                 tg3_chk_missed_msi(tp);
9506
9507         if (!tg3_flag(tp, TAGGED_STATUS)) {
9508                 /* All of this garbage is because when using non-tagged
9509                  * IRQ status the mailbox/status_block protocol the chip
9510                  * uses with the cpu is race prone.
9511                  */
9512                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9513                         tw32(GRC_LOCAL_CTRL,
9514                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9515                 } else {
9516                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9517                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9518                 }
9519
9520                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9521                         spin_unlock(&tp->lock);
9522                         tg3_reset_task_schedule(tp);
9523                         goto restart_timer;
9524                 }
9525         }
9526
9527         /* This part only runs once per second. */
9528         if (!--tp->timer_counter) {
9529                 if (tg3_flag(tp, 5705_PLUS))
9530                         tg3_periodic_fetch_stats(tp);
9531
9532                 if (tp->setlpicnt && !--tp->setlpicnt)
9533                         tg3_phy_eee_enable(tp);
9534
9535                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9536                         u32 mac_stat;
9537                         int phy_event;
9538
9539                         mac_stat = tr32(MAC_STATUS);
9540
9541                         phy_event = 0;
9542                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9543                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9544                                         phy_event = 1;
9545                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9546                                 phy_event = 1;
9547
9548                         if (phy_event)
9549                                 tg3_setup_phy(tp, 0);
9550                 } else if (tg3_flag(tp, POLL_SERDES)) {
9551                         u32 mac_stat = tr32(MAC_STATUS);
9552                         int need_setup = 0;
9553
9554                         if (netif_carrier_ok(tp->dev) &&
9555                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9556                                 need_setup = 1;
9557                         }
9558                         if (!netif_carrier_ok(tp->dev) &&
9559                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9560                                          MAC_STATUS_SIGNAL_DET))) {
9561                                 need_setup = 1;
9562                         }
9563                         if (need_setup) {
9564                                 if (!tp->serdes_counter) {
9565                                         tw32_f(MAC_MODE,
9566                                              (tp->mac_mode &
9567                                               ~MAC_MODE_PORT_MODE_MASK));
9568                                         udelay(40);
9569                                         tw32_f(MAC_MODE, tp->mac_mode);
9570                                         udelay(40);
9571                                 }
9572                                 tg3_setup_phy(tp, 0);
9573                         }
9574                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9575                            tg3_flag(tp, 5780_CLASS)) {
9576                         tg3_serdes_parallel_detect(tp);
9577                 }
9578
9579                 tp->timer_counter = tp->timer_multiplier;
9580         }
9581
9582         /* Heartbeat is only sent once every 2 seconds.
9583          *
9584          * The heartbeat is to tell the ASF firmware that the host
9585          * driver is still alive.  In the event that the OS crashes,
9586          * ASF needs to reset the hardware to free up the FIFO space
9587          * that may be filled with rx packets destined for the host.
9588          * If the FIFO is full, ASF will no longer function properly.
9589          *
9590          * Unintended resets have been reported on real time kernels
9591          * where the timer doesn't run on time.  Netpoll will also have
9592          * same problem.
9593          *
9594          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9595          * to check the ring condition when the heartbeat is expiring
9596          * before doing the reset.  This will prevent most unintended
9597          * resets.
9598          */
9599         if (!--tp->asf_counter) {
9600                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9601                         tg3_wait_for_event_ack(tp);
9602
9603                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9604                                       FWCMD_NICDRV_ALIVE3);
9605                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9606                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9607                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9608
9609                         tg3_generate_fw_event(tp);
9610                 }
9611                 tp->asf_counter = tp->asf_multiplier;
9612         }
9613
9614         spin_unlock(&tp->lock);
9615
9616 restart_timer:
9617         tp->timer.expires = jiffies + tp->timer_offset;
9618         add_timer(&tp->timer);
9619 }
9620
9621 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9622 {
9623         irq_handler_t fn;
9624         unsigned long flags;
9625         char *name;
9626         struct tg3_napi *tnapi = &tp->napi[irq_num];
9627
9628         if (tp->irq_cnt == 1)
9629                 name = tp->dev->name;
9630         else {
9631                 name = &tnapi->irq_lbl[0];
9632                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9633                 name[IFNAMSIZ-1] = 0;
9634         }
9635
9636         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9637                 fn = tg3_msi;
9638                 if (tg3_flag(tp, 1SHOT_MSI))
9639                         fn = tg3_msi_1shot;
9640                 flags = 0;
9641         } else {
9642                 fn = tg3_interrupt;
9643                 if (tg3_flag(tp, TAGGED_STATUS))
9644                         fn = tg3_interrupt_tagged;
9645                 flags = IRQF_SHARED;
9646         }
9647
9648         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9649 }
9650
9651 static int tg3_test_interrupt(struct tg3 *tp)
9652 {
9653         struct tg3_napi *tnapi = &tp->napi[0];
9654         struct net_device *dev = tp->dev;
9655         int err, i, intr_ok = 0;
9656         u32 val;
9657
9658         if (!netif_running(dev))
9659                 return -ENODEV;
9660
9661         tg3_disable_ints(tp);
9662
9663         free_irq(tnapi->irq_vec, tnapi);
9664
9665         /*
9666          * Turn off MSI one shot mode.  Otherwise this test has no
9667          * observable way to know whether the interrupt was delivered.
9668          */
9669         if (tg3_flag(tp, 57765_PLUS)) {
9670                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9671                 tw32(MSGINT_MODE, val);
9672         }
9673
9674         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9675                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9676         if (err)
9677                 return err;
9678
9679         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9680         tg3_enable_ints(tp);
9681
9682         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9683                tnapi->coal_now);
9684
9685         for (i = 0; i < 5; i++) {
9686                 u32 int_mbox, misc_host_ctrl;
9687
9688                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9689                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9690
9691                 if ((int_mbox != 0) ||
9692                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9693                         intr_ok = 1;
9694                         break;
9695                 }
9696
9697                 if (tg3_flag(tp, 57765_PLUS) &&
9698                     tnapi->hw_status->status_tag != tnapi->last_tag)
9699                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9700
9701                 msleep(10);
9702         }
9703
9704         tg3_disable_ints(tp);
9705
9706         free_irq(tnapi->irq_vec, tnapi);
9707
9708         err = tg3_request_irq(tp, 0);
9709
9710         if (err)
9711                 return err;
9712
9713         if (intr_ok) {
9714                 /* Reenable MSI one shot mode. */
9715                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9716                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9717                         tw32(MSGINT_MODE, val);
9718                 }
9719                 return 0;
9720         }
9721
9722         return -EIO;
9723 }
9724
9725 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9726  * successfully restored
9727  */
9728 static int tg3_test_msi(struct tg3 *tp)
9729 {
9730         int err;
9731         u16 pci_cmd;
9732
9733         if (!tg3_flag(tp, USING_MSI))
9734                 return 0;
9735
9736         /* Turn off SERR reporting in case MSI terminates with Master
9737          * Abort.
9738          */
9739         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9740         pci_write_config_word(tp->pdev, PCI_COMMAND,
9741                               pci_cmd & ~PCI_COMMAND_SERR);
9742
9743         err = tg3_test_interrupt(tp);
9744
9745         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9746
9747         if (!err)
9748                 return 0;
9749
9750         /* other failures */
9751         if (err != -EIO)
9752                 return err;
9753
9754         /* MSI test failed, go back to INTx mode */
9755         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9756                     "to INTx mode. Please report this failure to the PCI "
9757                     "maintainer and include system chipset information\n");
9758
9759         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9760
9761         pci_disable_msi(tp->pdev);
9762
9763         tg3_flag_clear(tp, USING_MSI);
9764         tp->napi[0].irq_vec = tp->pdev->irq;
9765
9766         err = tg3_request_irq(tp, 0);
9767         if (err)
9768                 return err;
9769
9770         /* Need to reset the chip because the MSI cycle may have terminated
9771          * with Master Abort.
9772          */
9773         tg3_full_lock(tp, 1);
9774
9775         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9776         err = tg3_init_hw(tp, 1);
9777
9778         tg3_full_unlock(tp);
9779
9780         if (err)
9781                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9782
9783         return err;
9784 }
9785
9786 static int tg3_request_firmware(struct tg3 *tp)
9787 {
9788         const __be32 *fw_data;
9789
9790         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9791                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9792                            tp->fw_needed);
9793                 return -ENOENT;
9794         }
9795
9796         fw_data = (void *)tp->fw->data;
9797
9798         /* Firmware blob starts with version numbers, followed by
9799          * start address and _full_ length including BSS sections
9800          * (which must be longer than the actual data, of course
9801          */
9802
9803         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9804         if (tp->fw_len < (tp->fw->size - 12)) {
9805                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9806                            tp->fw_len, tp->fw_needed);
9807                 release_firmware(tp->fw);
9808                 tp->fw = NULL;
9809                 return -EINVAL;
9810         }
9811
9812         /* We no longer need firmware; we have it. */
9813         tp->fw_needed = NULL;
9814         return 0;
9815 }
9816
9817 static bool tg3_enable_msix(struct tg3 *tp)
9818 {
9819         int i, rc;
9820         struct msix_entry msix_ent[tp->irq_max];
9821
9822         tp->irq_cnt = num_online_cpus();
9823         if (tp->irq_cnt > 1) {
9824                 /* We want as many rx rings enabled as there are cpus.
9825                  * In multiqueue MSI-X mode, the first MSI-X vector
9826                  * only deals with link interrupts, etc, so we add
9827                  * one to the number of vectors we are requesting.
9828                  */
9829                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9830         }
9831
9832         for (i = 0; i < tp->irq_max; i++) {
9833                 msix_ent[i].entry  = i;
9834                 msix_ent[i].vector = 0;
9835         }
9836
9837         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9838         if (rc < 0) {
9839                 return false;
9840         } else if (rc != 0) {
9841                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9842                         return false;
9843                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9844                               tp->irq_cnt, rc);
9845                 tp->irq_cnt = rc;
9846         }
9847
9848         for (i = 0; i < tp->irq_max; i++)
9849                 tp->napi[i].irq_vec = msix_ent[i].vector;
9850
9851         netif_set_real_num_tx_queues(tp->dev, 1);
9852         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9853         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9854                 pci_disable_msix(tp->pdev);
9855                 return false;
9856         }
9857
9858         if (tp->irq_cnt > 1) {
9859                 tg3_flag_set(tp, ENABLE_RSS);
9860
9861                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9862                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9863                         tg3_flag_set(tp, ENABLE_TSS);
9864                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9865                 }
9866         }
9867
9868         return true;
9869 }
9870
9871 static void tg3_ints_init(struct tg3 *tp)
9872 {
9873         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9874             !tg3_flag(tp, TAGGED_STATUS)) {
9875                 /* All MSI supporting chips should support tagged
9876                  * status.  Assert that this is the case.
9877                  */
9878                 netdev_warn(tp->dev,
9879                             "MSI without TAGGED_STATUS? Not using MSI\n");
9880                 goto defcfg;
9881         }
9882
9883         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9884                 tg3_flag_set(tp, USING_MSIX);
9885         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9886                 tg3_flag_set(tp, USING_MSI);
9887
9888         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9889                 u32 msi_mode = tr32(MSGINT_MODE);
9890                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9891                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9892                 if (!tg3_flag(tp, 1SHOT_MSI))
9893                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9894                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9895         }
9896 defcfg:
9897         if (!tg3_flag(tp, USING_MSIX)) {
9898                 tp->irq_cnt = 1;
9899                 tp->napi[0].irq_vec = tp->pdev->irq;
9900                 netif_set_real_num_tx_queues(tp->dev, 1);
9901                 netif_set_real_num_rx_queues(tp->dev, 1);
9902         }
9903 }
9904
9905 static void tg3_ints_fini(struct tg3 *tp)
9906 {
9907         if (tg3_flag(tp, USING_MSIX))
9908                 pci_disable_msix(tp->pdev);
9909         else if (tg3_flag(tp, USING_MSI))
9910                 pci_disable_msi(tp->pdev);
9911         tg3_flag_clear(tp, USING_MSI);
9912         tg3_flag_clear(tp, USING_MSIX);
9913         tg3_flag_clear(tp, ENABLE_RSS);
9914         tg3_flag_clear(tp, ENABLE_TSS);
9915 }
9916
9917 static int tg3_open(struct net_device *dev)
9918 {
9919         struct tg3 *tp = netdev_priv(dev);
9920         int i, err;
9921
9922         if (tp->fw_needed) {
9923                 err = tg3_request_firmware(tp);
9924                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9925                         if (err)
9926                                 return err;
9927                 } else if (err) {
9928                         netdev_warn(tp->dev, "TSO capability disabled\n");
9929                         tg3_flag_clear(tp, TSO_CAPABLE);
9930                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9931                         netdev_notice(tp->dev, "TSO capability restored\n");
9932                         tg3_flag_set(tp, TSO_CAPABLE);
9933                 }
9934         }
9935
9936         netif_carrier_off(tp->dev);
9937
9938         err = tg3_power_up(tp);
9939         if (err)
9940                 return err;
9941
9942         tg3_full_lock(tp, 0);
9943
9944         tg3_disable_ints(tp);
9945         tg3_flag_clear(tp, INIT_COMPLETE);
9946
9947         tg3_full_unlock(tp);
9948
9949         /*
9950          * Setup interrupts first so we know how
9951          * many NAPI resources to allocate
9952          */
9953         tg3_ints_init(tp);
9954
9955         tg3_rss_check_indir_tbl(tp);
9956
9957         /* The placement of this call is tied
9958          * to the setup and use of Host TX descriptors.
9959          */
9960         err = tg3_alloc_consistent(tp);
9961         if (err)
9962                 goto err_out1;
9963
9964         tg3_napi_init(tp);
9965
9966         tg3_napi_enable(tp);
9967
9968         for (i = 0; i < tp->irq_cnt; i++) {
9969                 struct tg3_napi *tnapi = &tp->napi[i];
9970                 err = tg3_request_irq(tp, i);
9971                 if (err) {
9972                         for (i--; i >= 0; i--) {
9973                                 tnapi = &tp->napi[i];
9974                                 free_irq(tnapi->irq_vec, tnapi);
9975                         }
9976                         goto err_out2;
9977                 }
9978         }
9979
9980         tg3_full_lock(tp, 0);
9981
9982         err = tg3_init_hw(tp, 1);
9983         if (err) {
9984                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9985                 tg3_free_rings(tp);
9986         } else {
9987                 if (tg3_flag(tp, TAGGED_STATUS) &&
9988                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9989                     !tg3_flag(tp, 57765_CLASS))
9990                         tp->timer_offset = HZ;
9991                 else
9992                         tp->timer_offset = HZ / 10;
9993
9994                 BUG_ON(tp->timer_offset > HZ);
9995                 tp->timer_counter = tp->timer_multiplier =
9996                         (HZ / tp->timer_offset);
9997                 tp->asf_counter = tp->asf_multiplier =
9998                         ((HZ / tp->timer_offset) * 2);
9999
10000                 init_timer(&tp->timer);
10001                 tp->timer.expires = jiffies + tp->timer_offset;
10002                 tp->timer.data = (unsigned long) tp;
10003                 tp->timer.function = tg3_timer;
10004         }
10005
10006         tg3_full_unlock(tp);
10007
10008         if (err)
10009                 goto err_out3;
10010
10011         if (tg3_flag(tp, USING_MSI)) {
10012                 err = tg3_test_msi(tp);
10013
10014                 if (err) {
10015                         tg3_full_lock(tp, 0);
10016                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10017                         tg3_free_rings(tp);
10018                         tg3_full_unlock(tp);
10019
10020                         goto err_out2;
10021                 }
10022
10023                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10024                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10025
10026                         tw32(PCIE_TRANSACTION_CFG,
10027                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10028                 }
10029         }
10030
10031         tg3_phy_start(tp);
10032
10033         tg3_full_lock(tp, 0);
10034
10035         add_timer(&tp->timer);
10036         tg3_flag_set(tp, INIT_COMPLETE);
10037         tg3_enable_ints(tp);
10038
10039         tg3_full_unlock(tp);
10040
10041         netif_tx_start_all_queues(dev);
10042
10043         /*
10044          * Reset loopback feature if it was turned on while the device was down
10045          * make sure that it's installed properly now.
10046          */
10047         if (dev->features & NETIF_F_LOOPBACK)
10048                 tg3_set_loopback(dev, dev->features);
10049
10050         return 0;
10051
10052 err_out3:
10053         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10054                 struct tg3_napi *tnapi = &tp->napi[i];
10055                 free_irq(tnapi->irq_vec, tnapi);
10056         }
10057
10058 err_out2:
10059         tg3_napi_disable(tp);
10060         tg3_napi_fini(tp);
10061         tg3_free_consistent(tp);
10062
10063 err_out1:
10064         tg3_ints_fini(tp);
10065         tg3_frob_aux_power(tp, false);
10066         pci_set_power_state(tp->pdev, PCI_D3hot);
10067         return err;
10068 }
10069
10070 static int tg3_close(struct net_device *dev)
10071 {
10072         int i;
10073         struct tg3 *tp = netdev_priv(dev);
10074
10075         tg3_napi_disable(tp);
10076         tg3_reset_task_cancel(tp);
10077
10078         netif_tx_stop_all_queues(dev);
10079
10080         del_timer_sync(&tp->timer);
10081
10082         tg3_phy_stop(tp);
10083
10084         tg3_full_lock(tp, 1);
10085
10086         tg3_disable_ints(tp);
10087
10088         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10089         tg3_free_rings(tp);
10090         tg3_flag_clear(tp, INIT_COMPLETE);
10091
10092         tg3_full_unlock(tp);
10093
10094         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10095                 struct tg3_napi *tnapi = &tp->napi[i];
10096                 free_irq(tnapi->irq_vec, tnapi);
10097         }
10098
10099         tg3_ints_fini(tp);
10100
10101         /* Clear stats across close / open calls */
10102         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10103         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10104
10105         tg3_napi_fini(tp);
10106
10107         tg3_free_consistent(tp);
10108
10109         tg3_power_down(tp);
10110
10111         netif_carrier_off(tp->dev);
10112
10113         return 0;
10114 }
10115
10116 static inline u64 get_stat64(tg3_stat64_t *val)
10117 {
10118        return ((u64)val->high << 32) | ((u64)val->low);
10119 }
10120
10121 static u64 calc_crc_errors(struct tg3 *tp)
10122 {
10123         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10124
10125         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10126             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10127              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10128                 u32 val;
10129
10130                 spin_lock_bh(&tp->lock);
10131                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10132                         tg3_writephy(tp, MII_TG3_TEST1,
10133                                      val | MII_TG3_TEST1_CRC_EN);
10134                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10135                 } else
10136                         val = 0;
10137                 spin_unlock_bh(&tp->lock);
10138
10139                 tp->phy_crc_errors += val;
10140
10141                 return tp->phy_crc_errors;
10142         }
10143
10144         return get_stat64(&hw_stats->rx_fcs_errors);
10145 }
10146
10147 #define ESTAT_ADD(member) \
10148         estats->member =        old_estats->member + \
10149                                 get_stat64(&hw_stats->member)
10150
10151 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10152                                                struct tg3_ethtool_stats *estats)
10153 {
10154         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10155         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10156
10157         if (!hw_stats)
10158                 return old_estats;
10159
10160         ESTAT_ADD(rx_octets);
10161         ESTAT_ADD(rx_fragments);
10162         ESTAT_ADD(rx_ucast_packets);
10163         ESTAT_ADD(rx_mcast_packets);
10164         ESTAT_ADD(rx_bcast_packets);
10165         ESTAT_ADD(rx_fcs_errors);
10166         ESTAT_ADD(rx_align_errors);
10167         ESTAT_ADD(rx_xon_pause_rcvd);
10168         ESTAT_ADD(rx_xoff_pause_rcvd);
10169         ESTAT_ADD(rx_mac_ctrl_rcvd);
10170         ESTAT_ADD(rx_xoff_entered);
10171         ESTAT_ADD(rx_frame_too_long_errors);
10172         ESTAT_ADD(rx_jabbers);
10173         ESTAT_ADD(rx_undersize_packets);
10174         ESTAT_ADD(rx_in_length_errors);
10175         ESTAT_ADD(rx_out_length_errors);
10176         ESTAT_ADD(rx_64_or_less_octet_packets);
10177         ESTAT_ADD(rx_65_to_127_octet_packets);
10178         ESTAT_ADD(rx_128_to_255_octet_packets);
10179         ESTAT_ADD(rx_256_to_511_octet_packets);
10180         ESTAT_ADD(rx_512_to_1023_octet_packets);
10181         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10182         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10183         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10184         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10185         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10186
10187         ESTAT_ADD(tx_octets);
10188         ESTAT_ADD(tx_collisions);
10189         ESTAT_ADD(tx_xon_sent);
10190         ESTAT_ADD(tx_xoff_sent);
10191         ESTAT_ADD(tx_flow_control);
10192         ESTAT_ADD(tx_mac_errors);
10193         ESTAT_ADD(tx_single_collisions);
10194         ESTAT_ADD(tx_mult_collisions);
10195         ESTAT_ADD(tx_deferred);
10196         ESTAT_ADD(tx_excessive_collisions);
10197         ESTAT_ADD(tx_late_collisions);
10198         ESTAT_ADD(tx_collide_2times);
10199         ESTAT_ADD(tx_collide_3times);
10200         ESTAT_ADD(tx_collide_4times);
10201         ESTAT_ADD(tx_collide_5times);
10202         ESTAT_ADD(tx_collide_6times);
10203         ESTAT_ADD(tx_collide_7times);
10204         ESTAT_ADD(tx_collide_8times);
10205         ESTAT_ADD(tx_collide_9times);
10206         ESTAT_ADD(tx_collide_10times);
10207         ESTAT_ADD(tx_collide_11times);
10208         ESTAT_ADD(tx_collide_12times);
10209         ESTAT_ADD(tx_collide_13times);
10210         ESTAT_ADD(tx_collide_14times);
10211         ESTAT_ADD(tx_collide_15times);
10212         ESTAT_ADD(tx_ucast_packets);
10213         ESTAT_ADD(tx_mcast_packets);
10214         ESTAT_ADD(tx_bcast_packets);
10215         ESTAT_ADD(tx_carrier_sense_errors);
10216         ESTAT_ADD(tx_discards);
10217         ESTAT_ADD(tx_errors);
10218
10219         ESTAT_ADD(dma_writeq_full);
10220         ESTAT_ADD(dma_write_prioq_full);
10221         ESTAT_ADD(rxbds_empty);
10222         ESTAT_ADD(rx_discards);
10223         ESTAT_ADD(rx_errors);
10224         ESTAT_ADD(rx_threshold_hit);
10225
10226         ESTAT_ADD(dma_readq_full);
10227         ESTAT_ADD(dma_read_prioq_full);
10228         ESTAT_ADD(tx_comp_queue_full);
10229
10230         ESTAT_ADD(ring_set_send_prod_index);
10231         ESTAT_ADD(ring_status_update);
10232         ESTAT_ADD(nic_irqs);
10233         ESTAT_ADD(nic_avoided_irqs);
10234         ESTAT_ADD(nic_tx_threshold_hit);
10235
10236         ESTAT_ADD(mbuf_lwm_thresh_hit);
10237
10238         return estats;
10239 }
10240
10241 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10242                                                  struct rtnl_link_stats64 *stats)
10243 {
10244         struct tg3 *tp = netdev_priv(dev);
10245         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10246         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10247
10248         if (!hw_stats)
10249                 return old_stats;
10250
10251         stats->rx_packets = old_stats->rx_packets +
10252                 get_stat64(&hw_stats->rx_ucast_packets) +
10253                 get_stat64(&hw_stats->rx_mcast_packets) +
10254                 get_stat64(&hw_stats->rx_bcast_packets);
10255
10256         stats->tx_packets = old_stats->tx_packets +
10257                 get_stat64(&hw_stats->tx_ucast_packets) +
10258                 get_stat64(&hw_stats->tx_mcast_packets) +
10259                 get_stat64(&hw_stats->tx_bcast_packets);
10260
10261         stats->rx_bytes = old_stats->rx_bytes +
10262                 get_stat64(&hw_stats->rx_octets);
10263         stats->tx_bytes = old_stats->tx_bytes +
10264                 get_stat64(&hw_stats->tx_octets);
10265
10266         stats->rx_errors = old_stats->rx_errors +
10267                 get_stat64(&hw_stats->rx_errors);
10268         stats->tx_errors = old_stats->tx_errors +
10269                 get_stat64(&hw_stats->tx_errors) +
10270                 get_stat64(&hw_stats->tx_mac_errors) +
10271                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10272                 get_stat64(&hw_stats->tx_discards);
10273
10274         stats->multicast = old_stats->multicast +
10275                 get_stat64(&hw_stats->rx_mcast_packets);
10276         stats->collisions = old_stats->collisions +
10277                 get_stat64(&hw_stats->tx_collisions);
10278
10279         stats->rx_length_errors = old_stats->rx_length_errors +
10280                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10281                 get_stat64(&hw_stats->rx_undersize_packets);
10282
10283         stats->rx_over_errors = old_stats->rx_over_errors +
10284                 get_stat64(&hw_stats->rxbds_empty);
10285         stats->rx_frame_errors = old_stats->rx_frame_errors +
10286                 get_stat64(&hw_stats->rx_align_errors);
10287         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10288                 get_stat64(&hw_stats->tx_discards);
10289         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10290                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10291
10292         stats->rx_crc_errors = old_stats->rx_crc_errors +
10293                 calc_crc_errors(tp);
10294
10295         stats->rx_missed_errors = old_stats->rx_missed_errors +
10296                 get_stat64(&hw_stats->rx_discards);
10297
10298         stats->rx_dropped = tp->rx_dropped;
10299         stats->tx_dropped = tp->tx_dropped;
10300
10301         return stats;
10302 }
10303
10304 static int tg3_get_regs_len(struct net_device *dev)
10305 {
10306         return TG3_REG_BLK_SIZE;
10307 }
10308
10309 static void tg3_get_regs(struct net_device *dev,
10310                 struct ethtool_regs *regs, void *_p)
10311 {
10312         struct tg3 *tp = netdev_priv(dev);
10313
10314         regs->version = 0;
10315
10316         memset(_p, 0, TG3_REG_BLK_SIZE);
10317
10318         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10319                 return;
10320
10321         tg3_full_lock(tp, 0);
10322
10323         tg3_dump_legacy_regs(tp, (u32 *)_p);
10324
10325         tg3_full_unlock(tp);
10326 }
10327
10328 static int tg3_get_eeprom_len(struct net_device *dev)
10329 {
10330         struct tg3 *tp = netdev_priv(dev);
10331
10332         return tp->nvram_size;
10333 }
10334
10335 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10336 {
10337         struct tg3 *tp = netdev_priv(dev);
10338         int ret;
10339         u8  *pd;
10340         u32 i, offset, len, b_offset, b_count;
10341         __be32 val;
10342
10343         if (tg3_flag(tp, NO_NVRAM))
10344                 return -EINVAL;
10345
10346         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10347                 return -EAGAIN;
10348
10349         offset = eeprom->offset;
10350         len = eeprom->len;
10351         eeprom->len = 0;
10352
10353         eeprom->magic = TG3_EEPROM_MAGIC;
10354
10355         if (offset & 3) {
10356                 /* adjustments to start on required 4 byte boundary */
10357                 b_offset = offset & 3;
10358                 b_count = 4 - b_offset;
10359                 if (b_count > len) {
10360                         /* i.e. offset=1 len=2 */
10361                         b_count = len;
10362                 }
10363                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10364                 if (ret)
10365                         return ret;
10366                 memcpy(data, ((char *)&val) + b_offset, b_count);
10367                 len -= b_count;
10368                 offset += b_count;
10369                 eeprom->len += b_count;
10370         }
10371
10372         /* read bytes up to the last 4 byte boundary */
10373         pd = &data[eeprom->len];
10374         for (i = 0; i < (len - (len & 3)); i += 4) {
10375                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10376                 if (ret) {
10377                         eeprom->len += i;
10378                         return ret;
10379                 }
10380                 memcpy(pd + i, &val, 4);
10381         }
10382         eeprom->len += i;
10383
10384         if (len & 3) {
10385                 /* read last bytes not ending on 4 byte boundary */
10386                 pd = &data[eeprom->len];
10387                 b_count = len & 3;
10388                 b_offset = offset + len - b_count;
10389                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10390                 if (ret)
10391                         return ret;
10392                 memcpy(pd, &val, b_count);
10393                 eeprom->len += b_count;
10394         }
10395         return 0;
10396 }
10397
10398 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10399 {
10400         struct tg3 *tp = netdev_priv(dev);
10401         int ret;
10402         u32 offset, len, b_offset, odd_len;
10403         u8 *buf;
10404         __be32 start, end;
10405
10406         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10407                 return -EAGAIN;
10408
10409         if (tg3_flag(tp, NO_NVRAM) ||
10410             eeprom->magic != TG3_EEPROM_MAGIC)
10411                 return -EINVAL;
10412
10413         offset = eeprom->offset;
10414         len = eeprom->len;
10415
10416         if ((b_offset = (offset & 3))) {
10417                 /* adjustments to start on required 4 byte boundary */
10418                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10419                 if (ret)
10420                         return ret;
10421                 len += b_offset;
10422                 offset &= ~3;
10423                 if (len < 4)
10424                         len = 4;
10425         }
10426
10427         odd_len = 0;
10428         if (len & 3) {
10429                 /* adjustments to end on required 4 byte boundary */
10430                 odd_len = 1;
10431                 len = (len + 3) & ~3;
10432                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10433                 if (ret)
10434                         return ret;
10435         }
10436
10437         buf = data;
10438         if (b_offset || odd_len) {
10439                 buf = kmalloc(len, GFP_KERNEL);
10440                 if (!buf)
10441                         return -ENOMEM;
10442                 if (b_offset)
10443                         memcpy(buf, &start, 4);
10444                 if (odd_len)
10445                         memcpy(buf+len-4, &end, 4);
10446                 memcpy(buf + b_offset, data, eeprom->len);
10447         }
10448
10449         ret = tg3_nvram_write_block(tp, offset, len, buf);
10450
10451         if (buf != data)
10452                 kfree(buf);
10453
10454         return ret;
10455 }
10456
10457 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10458 {
10459         struct tg3 *tp = netdev_priv(dev);
10460
10461         if (tg3_flag(tp, USE_PHYLIB)) {
10462                 struct phy_device *phydev;
10463                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10464                         return -EAGAIN;
10465                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10466                 return phy_ethtool_gset(phydev, cmd);
10467         }
10468
10469         cmd->supported = (SUPPORTED_Autoneg);
10470
10471         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10472                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10473                                    SUPPORTED_1000baseT_Full);
10474
10475         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10476                 cmd->supported |= (SUPPORTED_100baseT_Half |
10477                                   SUPPORTED_100baseT_Full |
10478                                   SUPPORTED_10baseT_Half |
10479                                   SUPPORTED_10baseT_Full |
10480                                   SUPPORTED_TP);
10481                 cmd->port = PORT_TP;
10482         } else {
10483                 cmd->supported |= SUPPORTED_FIBRE;
10484                 cmd->port = PORT_FIBRE;
10485         }
10486
10487         cmd->advertising = tp->link_config.advertising;
10488         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10489                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10490                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10491                                 cmd->advertising |= ADVERTISED_Pause;
10492                         } else {
10493                                 cmd->advertising |= ADVERTISED_Pause |
10494                                                     ADVERTISED_Asym_Pause;
10495                         }
10496                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10497                         cmd->advertising |= ADVERTISED_Asym_Pause;
10498                 }
10499         }
10500         if (netif_running(dev) && netif_carrier_ok(dev)) {
10501                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10502                 cmd->duplex = tp->link_config.active_duplex;
10503                 cmd->lp_advertising = tp->link_config.rmt_adv;
10504                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10505                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10506                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10507                         else
10508                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10509                 }
10510         } else {
10511                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10512                 cmd->duplex = DUPLEX_INVALID;
10513                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10514         }
10515         cmd->phy_address = tp->phy_addr;
10516         cmd->transceiver = XCVR_INTERNAL;
10517         cmd->autoneg = tp->link_config.autoneg;
10518         cmd->maxtxpkt = 0;
10519         cmd->maxrxpkt = 0;
10520         return 0;
10521 }
10522
10523 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10524 {
10525         struct tg3 *tp = netdev_priv(dev);
10526         u32 speed = ethtool_cmd_speed(cmd);
10527
10528         if (tg3_flag(tp, USE_PHYLIB)) {
10529                 struct phy_device *phydev;
10530                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10531                         return -EAGAIN;
10532                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10533                 return phy_ethtool_sset(phydev, cmd);
10534         }
10535
10536         if (cmd->autoneg != AUTONEG_ENABLE &&
10537             cmd->autoneg != AUTONEG_DISABLE)
10538                 return -EINVAL;
10539
10540         if (cmd->autoneg == AUTONEG_DISABLE &&
10541             cmd->duplex != DUPLEX_FULL &&
10542             cmd->duplex != DUPLEX_HALF)
10543                 return -EINVAL;
10544
10545         if (cmd->autoneg == AUTONEG_ENABLE) {
10546                 u32 mask = ADVERTISED_Autoneg |
10547                            ADVERTISED_Pause |
10548                            ADVERTISED_Asym_Pause;
10549
10550                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10551                         mask |= ADVERTISED_1000baseT_Half |
10552                                 ADVERTISED_1000baseT_Full;
10553
10554                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10555                         mask |= ADVERTISED_100baseT_Half |
10556                                 ADVERTISED_100baseT_Full |
10557                                 ADVERTISED_10baseT_Half |
10558                                 ADVERTISED_10baseT_Full |
10559                                 ADVERTISED_TP;
10560                 else
10561                         mask |= ADVERTISED_FIBRE;
10562
10563                 if (cmd->advertising & ~mask)
10564                         return -EINVAL;
10565
10566                 mask &= (ADVERTISED_1000baseT_Half |
10567                          ADVERTISED_1000baseT_Full |
10568                          ADVERTISED_100baseT_Half |
10569                          ADVERTISED_100baseT_Full |
10570                          ADVERTISED_10baseT_Half |
10571                          ADVERTISED_10baseT_Full);
10572
10573                 cmd->advertising &= mask;
10574         } else {
10575                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10576                         if (speed != SPEED_1000)
10577                                 return -EINVAL;
10578
10579                         if (cmd->duplex != DUPLEX_FULL)
10580                                 return -EINVAL;
10581                 } else {
10582                         if (speed != SPEED_100 &&
10583                             speed != SPEED_10)
10584                                 return -EINVAL;
10585                 }
10586         }
10587
10588         tg3_full_lock(tp, 0);
10589
10590         tp->link_config.autoneg = cmd->autoneg;
10591         if (cmd->autoneg == AUTONEG_ENABLE) {
10592                 tp->link_config.advertising = (cmd->advertising |
10593                                               ADVERTISED_Autoneg);
10594                 tp->link_config.speed = SPEED_INVALID;
10595                 tp->link_config.duplex = DUPLEX_INVALID;
10596         } else {
10597                 tp->link_config.advertising = 0;
10598                 tp->link_config.speed = speed;
10599                 tp->link_config.duplex = cmd->duplex;
10600         }
10601
10602         tp->link_config.orig_speed = tp->link_config.speed;
10603         tp->link_config.orig_duplex = tp->link_config.duplex;
10604         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10605
10606         if (netif_running(dev))
10607                 tg3_setup_phy(tp, 1);
10608
10609         tg3_full_unlock(tp);
10610
10611         return 0;
10612 }
10613
10614 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10615 {
10616         struct tg3 *tp = netdev_priv(dev);
10617
10618         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10619         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10620         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10621         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10622 }
10623
10624 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10625 {
10626         struct tg3 *tp = netdev_priv(dev);
10627
10628         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10629                 wol->supported = WAKE_MAGIC;
10630         else
10631                 wol->supported = 0;
10632         wol->wolopts = 0;
10633         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10634                 wol->wolopts = WAKE_MAGIC;
10635         memset(&wol->sopass, 0, sizeof(wol->sopass));
10636 }
10637
10638 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10639 {
10640         struct tg3 *tp = netdev_priv(dev);
10641         struct device *dp = &tp->pdev->dev;
10642
10643         if (wol->wolopts & ~WAKE_MAGIC)
10644                 return -EINVAL;
10645         if ((wol->wolopts & WAKE_MAGIC) &&
10646             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10647                 return -EINVAL;
10648
10649         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10650
10651         spin_lock_bh(&tp->lock);
10652         if (device_may_wakeup(dp))
10653                 tg3_flag_set(tp, WOL_ENABLE);
10654         else
10655                 tg3_flag_clear(tp, WOL_ENABLE);
10656         spin_unlock_bh(&tp->lock);
10657
10658         return 0;
10659 }
10660
10661 static u32 tg3_get_msglevel(struct net_device *dev)
10662 {
10663         struct tg3 *tp = netdev_priv(dev);
10664         return tp->msg_enable;
10665 }
10666
10667 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10668 {
10669         struct tg3 *tp = netdev_priv(dev);
10670         tp->msg_enable = value;
10671 }
10672
10673 static int tg3_nway_reset(struct net_device *dev)
10674 {
10675         struct tg3 *tp = netdev_priv(dev);
10676         int r;
10677
10678         if (!netif_running(dev))
10679                 return -EAGAIN;
10680
10681         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10682                 return -EINVAL;
10683
10684         if (tg3_flag(tp, USE_PHYLIB)) {
10685                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10686                         return -EAGAIN;
10687                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10688         } else {
10689                 u32 bmcr;
10690
10691                 spin_lock_bh(&tp->lock);
10692                 r = -EINVAL;
10693                 tg3_readphy(tp, MII_BMCR, &bmcr);
10694                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10695                     ((bmcr & BMCR_ANENABLE) ||
10696                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10697                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10698                                                    BMCR_ANENABLE);
10699                         r = 0;
10700                 }
10701                 spin_unlock_bh(&tp->lock);
10702         }
10703
10704         return r;
10705 }
10706
10707 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10708 {
10709         struct tg3 *tp = netdev_priv(dev);
10710
10711         ering->rx_max_pending = tp->rx_std_ring_mask;
10712         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10713                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10714         else
10715                 ering->rx_jumbo_max_pending = 0;
10716
10717         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10718
10719         ering->rx_pending = tp->rx_pending;
10720         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10721                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10722         else
10723                 ering->rx_jumbo_pending = 0;
10724
10725         ering->tx_pending = tp->napi[0].tx_pending;
10726 }
10727
10728 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10729 {
10730         struct tg3 *tp = netdev_priv(dev);
10731         int i, irq_sync = 0, err = 0;
10732
10733         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10734             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10735             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10736             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10737             (tg3_flag(tp, TSO_BUG) &&
10738              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10739                 return -EINVAL;
10740
10741         if (netif_running(dev)) {
10742                 tg3_phy_stop(tp);
10743                 tg3_netif_stop(tp);
10744                 irq_sync = 1;
10745         }
10746
10747         tg3_full_lock(tp, irq_sync);
10748
10749         tp->rx_pending = ering->rx_pending;
10750
10751         if (tg3_flag(tp, MAX_RXPEND_64) &&
10752             tp->rx_pending > 63)
10753                 tp->rx_pending = 63;
10754         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10755
10756         for (i = 0; i < tp->irq_max; i++)
10757                 tp->napi[i].tx_pending = ering->tx_pending;
10758
10759         if (netif_running(dev)) {
10760                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10761                 err = tg3_restart_hw(tp, 1);
10762                 if (!err)
10763                         tg3_netif_start(tp);
10764         }
10765
10766         tg3_full_unlock(tp);
10767
10768         if (irq_sync && !err)
10769                 tg3_phy_start(tp);
10770
10771         return err;
10772 }
10773
10774 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10775 {
10776         struct tg3 *tp = netdev_priv(dev);
10777
10778         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10779
10780         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10781                 epause->rx_pause = 1;
10782         else
10783                 epause->rx_pause = 0;
10784
10785         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10786                 epause->tx_pause = 1;
10787         else
10788                 epause->tx_pause = 0;
10789 }
10790
10791 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10792 {
10793         struct tg3 *tp = netdev_priv(dev);
10794         int err = 0;
10795
10796         if (tg3_flag(tp, USE_PHYLIB)) {
10797                 u32 newadv;
10798                 struct phy_device *phydev;
10799
10800                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10801
10802                 if (!(phydev->supported & SUPPORTED_Pause) ||
10803                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10804                      (epause->rx_pause != epause->tx_pause)))
10805                         return -EINVAL;
10806
10807                 tp->link_config.flowctrl = 0;
10808                 if (epause->rx_pause) {
10809                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10810
10811                         if (epause->tx_pause) {
10812                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10813                                 newadv = ADVERTISED_Pause;
10814                         } else
10815                                 newadv = ADVERTISED_Pause |
10816                                          ADVERTISED_Asym_Pause;
10817                 } else if (epause->tx_pause) {
10818                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10819                         newadv = ADVERTISED_Asym_Pause;
10820                 } else
10821                         newadv = 0;
10822
10823                 if (epause->autoneg)
10824                         tg3_flag_set(tp, PAUSE_AUTONEG);
10825                 else
10826                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10827
10828                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10829                         u32 oldadv = phydev->advertising &
10830                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10831                         if (oldadv != newadv) {
10832                                 phydev->advertising &=
10833                                         ~(ADVERTISED_Pause |
10834                                           ADVERTISED_Asym_Pause);
10835                                 phydev->advertising |= newadv;
10836                                 if (phydev->autoneg) {
10837                                         /*
10838                                          * Always renegotiate the link to
10839                                          * inform our link partner of our
10840                                          * flow control settings, even if the
10841                                          * flow control is forced.  Let
10842                                          * tg3_adjust_link() do the final
10843                                          * flow control setup.
10844                                          */
10845                                         return phy_start_aneg(phydev);
10846                                 }
10847                         }
10848
10849                         if (!epause->autoneg)
10850                                 tg3_setup_flow_control(tp, 0, 0);
10851                 } else {
10852                         tp->link_config.orig_advertising &=
10853                                         ~(ADVERTISED_Pause |
10854                                           ADVERTISED_Asym_Pause);
10855                         tp->link_config.orig_advertising |= newadv;
10856                 }
10857         } else {
10858                 int irq_sync = 0;
10859
10860                 if (netif_running(dev)) {
10861                         tg3_netif_stop(tp);
10862                         irq_sync = 1;
10863                 }
10864
10865                 tg3_full_lock(tp, irq_sync);
10866
10867                 if (epause->autoneg)
10868                         tg3_flag_set(tp, PAUSE_AUTONEG);
10869                 else
10870                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10871                 if (epause->rx_pause)
10872                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10873                 else
10874                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10875                 if (epause->tx_pause)
10876                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10877                 else
10878                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10879
10880                 if (netif_running(dev)) {
10881                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10882                         err = tg3_restart_hw(tp, 1);
10883                         if (!err)
10884                                 tg3_netif_start(tp);
10885                 }
10886
10887                 tg3_full_unlock(tp);
10888         }
10889
10890         return err;
10891 }
10892
10893 static int tg3_get_sset_count(struct net_device *dev, int sset)
10894 {
10895         switch (sset) {
10896         case ETH_SS_TEST:
10897                 return TG3_NUM_TEST;
10898         case ETH_SS_STATS:
10899                 return TG3_NUM_STATS;
10900         default:
10901                 return -EOPNOTSUPP;
10902         }
10903 }
10904
10905 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10906                          u32 *rules __always_unused)
10907 {
10908         struct tg3 *tp = netdev_priv(dev);
10909
10910         if (!tg3_flag(tp, SUPPORT_MSIX))
10911                 return -EOPNOTSUPP;
10912
10913         switch (info->cmd) {
10914         case ETHTOOL_GRXRINGS:
10915                 if (netif_running(tp->dev))
10916                         info->data = tp->irq_cnt;
10917                 else {
10918                         info->data = num_online_cpus();
10919                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10920                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10921                 }
10922
10923                 /* The first interrupt vector only
10924                  * handles link interrupts.
10925                  */
10926                 info->data -= 1;
10927                 return 0;
10928
10929         default:
10930                 return -EOPNOTSUPP;
10931         }
10932 }
10933
10934 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10935 {
10936         u32 size = 0;
10937         struct tg3 *tp = netdev_priv(dev);
10938
10939         if (tg3_flag(tp, SUPPORT_MSIX))
10940                 size = TG3_RSS_INDIR_TBL_SIZE;
10941
10942         return size;
10943 }
10944
10945 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10946 {
10947         struct tg3 *tp = netdev_priv(dev);
10948         int i;
10949
10950         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10951                 indir[i] = tp->rss_ind_tbl[i];
10952
10953         return 0;
10954 }
10955
10956 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10957 {
10958         struct tg3 *tp = netdev_priv(dev);
10959         size_t i;
10960
10961         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10962                 tp->rss_ind_tbl[i] = indir[i];
10963
10964         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10965                 return 0;
10966
10967         /* It is legal to write the indirection
10968          * table while the device is running.
10969          */
10970         tg3_full_lock(tp, 0);
10971         tg3_rss_write_indir_tbl(tp);
10972         tg3_full_unlock(tp);
10973
10974         return 0;
10975 }
10976
10977 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10978 {
10979         switch (stringset) {
10980         case ETH_SS_STATS:
10981                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10982                 break;
10983         case ETH_SS_TEST:
10984                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10985                 break;
10986         default:
10987                 WARN_ON(1);     /* we need a WARN() */
10988                 break;
10989         }
10990 }
10991
10992 static int tg3_set_phys_id(struct net_device *dev,
10993                             enum ethtool_phys_id_state state)
10994 {
10995         struct tg3 *tp = netdev_priv(dev);
10996
10997         if (!netif_running(tp->dev))
10998                 return -EAGAIN;
10999
11000         switch (state) {
11001         case ETHTOOL_ID_ACTIVE:
11002                 return 1;       /* cycle on/off once per second */
11003
11004         case ETHTOOL_ID_ON:
11005                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11006                      LED_CTRL_1000MBPS_ON |
11007                      LED_CTRL_100MBPS_ON |
11008                      LED_CTRL_10MBPS_ON |
11009                      LED_CTRL_TRAFFIC_OVERRIDE |
11010                      LED_CTRL_TRAFFIC_BLINK |
11011                      LED_CTRL_TRAFFIC_LED);
11012                 break;
11013
11014         case ETHTOOL_ID_OFF:
11015                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11016                      LED_CTRL_TRAFFIC_OVERRIDE);
11017                 break;
11018
11019         case ETHTOOL_ID_INACTIVE:
11020                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11021                 break;
11022         }
11023
11024         return 0;
11025 }
11026
11027 static void tg3_get_ethtool_stats(struct net_device *dev,
11028                                    struct ethtool_stats *estats, u64 *tmp_stats)
11029 {
11030         struct tg3 *tp = netdev_priv(dev);
11031
11032         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11033 }
11034
11035 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11036 {
11037         int i;
11038         __be32 *buf;
11039         u32 offset = 0, len = 0;
11040         u32 magic, val;
11041
11042         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11043                 return NULL;
11044
11045         if (magic == TG3_EEPROM_MAGIC) {
11046                 for (offset = TG3_NVM_DIR_START;
11047                      offset < TG3_NVM_DIR_END;
11048                      offset += TG3_NVM_DIRENT_SIZE) {
11049                         if (tg3_nvram_read(tp, offset, &val))
11050                                 return NULL;
11051
11052                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11053                             TG3_NVM_DIRTYPE_EXTVPD)
11054                                 break;
11055                 }
11056
11057                 if (offset != TG3_NVM_DIR_END) {
11058                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11059                         if (tg3_nvram_read(tp, offset + 4, &offset))
11060                                 return NULL;
11061
11062                         offset = tg3_nvram_logical_addr(tp, offset);
11063                 }
11064         }
11065
11066         if (!offset || !len) {
11067                 offset = TG3_NVM_VPD_OFF;
11068                 len = TG3_NVM_VPD_LEN;
11069         }
11070
11071         buf = kmalloc(len, GFP_KERNEL);
11072         if (buf == NULL)
11073                 return NULL;
11074
11075         if (magic == TG3_EEPROM_MAGIC) {
11076                 for (i = 0; i < len; i += 4) {
11077                         /* The data is in little-endian format in NVRAM.
11078                          * Use the big-endian read routines to preserve
11079                          * the byte order as it exists in NVRAM.
11080                          */
11081                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11082                                 goto error;
11083                 }
11084         } else {
11085                 u8 *ptr;
11086                 ssize_t cnt;
11087                 unsigned int pos = 0;
11088
11089                 ptr = (u8 *)&buf[0];
11090                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11091                         cnt = pci_read_vpd(tp->pdev, pos,
11092                                            len - pos, ptr);
11093                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11094                                 cnt = 0;
11095                         else if (cnt < 0)
11096                                 goto error;
11097                 }
11098                 if (pos != len)
11099                         goto error;
11100         }
11101
11102         *vpdlen = len;
11103
11104         return buf;
11105
11106 error:
11107         kfree(buf);
11108         return NULL;
11109 }
11110
11111 #define NVRAM_TEST_SIZE 0x100
11112 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11113 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11114 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11115 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11116 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11117 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11118 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11119 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11120
11121 static int tg3_test_nvram(struct tg3 *tp)
11122 {
11123         u32 csum, magic, len;
11124         __be32 *buf;
11125         int i, j, k, err = 0, size;
11126
11127         if (tg3_flag(tp, NO_NVRAM))
11128                 return 0;
11129
11130         if (tg3_nvram_read(tp, 0, &magic) != 0)
11131                 return -EIO;
11132
11133         if (magic == TG3_EEPROM_MAGIC)
11134                 size = NVRAM_TEST_SIZE;
11135         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11136                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11137                     TG3_EEPROM_SB_FORMAT_1) {
11138                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11139                         case TG3_EEPROM_SB_REVISION_0:
11140                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11141                                 break;
11142                         case TG3_EEPROM_SB_REVISION_2:
11143                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11144                                 break;
11145                         case TG3_EEPROM_SB_REVISION_3:
11146                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11147                                 break;
11148                         case TG3_EEPROM_SB_REVISION_4:
11149                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11150                                 break;
11151                         case TG3_EEPROM_SB_REVISION_5:
11152                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11153                                 break;
11154                         case TG3_EEPROM_SB_REVISION_6:
11155                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11156                                 break;
11157                         default:
11158                                 return -EIO;
11159                         }
11160                 } else
11161                         return 0;
11162         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11163                 size = NVRAM_SELFBOOT_HW_SIZE;
11164         else
11165                 return -EIO;
11166
11167         buf = kmalloc(size, GFP_KERNEL);
11168         if (buf == NULL)
11169                 return -ENOMEM;
11170
11171         err = -EIO;
11172         for (i = 0, j = 0; i < size; i += 4, j++) {
11173                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11174                 if (err)
11175                         break;
11176         }
11177         if (i < size)
11178                 goto out;
11179
11180         /* Selfboot format */
11181         magic = be32_to_cpu(buf[0]);
11182         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11183             TG3_EEPROM_MAGIC_FW) {
11184                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11185
11186                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11187                     TG3_EEPROM_SB_REVISION_2) {
11188                         /* For rev 2, the csum doesn't include the MBA. */
11189                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11190                                 csum8 += buf8[i];
11191                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11192                                 csum8 += buf8[i];
11193                 } else {
11194                         for (i = 0; i < size; i++)
11195                                 csum8 += buf8[i];
11196                 }
11197
11198                 if (csum8 == 0) {
11199                         err = 0;
11200                         goto out;
11201                 }
11202
11203                 err = -EIO;
11204                 goto out;
11205         }
11206
11207         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11208             TG3_EEPROM_MAGIC_HW) {
11209                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11210                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11211                 u8 *buf8 = (u8 *) buf;
11212
11213                 /* Separate the parity bits and the data bytes.  */
11214                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11215                         if ((i == 0) || (i == 8)) {
11216                                 int l;
11217                                 u8 msk;
11218
11219                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11220                                         parity[k++] = buf8[i] & msk;
11221                                 i++;
11222                         } else if (i == 16) {
11223                                 int l;
11224                                 u8 msk;
11225
11226                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11227                                         parity[k++] = buf8[i] & msk;
11228                                 i++;
11229
11230                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11231                                         parity[k++] = buf8[i] & msk;
11232                                 i++;
11233                         }
11234                         data[j++] = buf8[i];
11235                 }
11236
11237                 err = -EIO;
11238                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11239                         u8 hw8 = hweight8(data[i]);
11240
11241                         if ((hw8 & 0x1) && parity[i])
11242                                 goto out;
11243                         else if (!(hw8 & 0x1) && !parity[i])
11244                                 goto out;
11245                 }
11246                 err = 0;
11247                 goto out;
11248         }
11249
11250         err = -EIO;
11251
11252         /* Bootstrap checksum at offset 0x10 */
11253         csum = calc_crc((unsigned char *) buf, 0x10);
11254         if (csum != le32_to_cpu(buf[0x10/4]))
11255                 goto out;
11256
11257         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11258         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11259         if (csum != le32_to_cpu(buf[0xfc/4]))
11260                 goto out;
11261
11262         kfree(buf);
11263
11264         buf = tg3_vpd_readblock(tp, &len);
11265         if (!buf)
11266                 return -ENOMEM;
11267
11268         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11269         if (i > 0) {
11270                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11271                 if (j < 0)
11272                         goto out;
11273
11274                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11275                         goto out;
11276
11277                 i += PCI_VPD_LRDT_TAG_SIZE;
11278                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11279                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11280                 if (j > 0) {
11281                         u8 csum8 = 0;
11282
11283                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11284
11285                         for (i = 0; i <= j; i++)
11286                                 csum8 += ((u8 *)buf)[i];
11287
11288                         if (csum8)
11289                                 goto out;
11290                 }
11291         }
11292
11293         err = 0;
11294
11295 out:
11296         kfree(buf);
11297         return err;
11298 }
11299
11300 #define TG3_SERDES_TIMEOUT_SEC  2
11301 #define TG3_COPPER_TIMEOUT_SEC  6
11302
11303 static int tg3_test_link(struct tg3 *tp)
11304 {
11305         int i, max;
11306
11307         if (!netif_running(tp->dev))
11308                 return -ENODEV;
11309
11310         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11311                 max = TG3_SERDES_TIMEOUT_SEC;
11312         else
11313                 max = TG3_COPPER_TIMEOUT_SEC;
11314
11315         for (i = 0; i < max; i++) {
11316                 if (netif_carrier_ok(tp->dev))
11317                         return 0;
11318
11319                 if (msleep_interruptible(1000))
11320                         break;
11321         }
11322
11323         return -EIO;
11324 }
11325
11326 /* Only test the commonly used registers */
11327 static int tg3_test_registers(struct tg3 *tp)
11328 {
11329         int i, is_5705, is_5750;
11330         u32 offset, read_mask, write_mask, val, save_val, read_val;
11331         static struct {
11332                 u16 offset;
11333                 u16 flags;
11334 #define TG3_FL_5705     0x1
11335 #define TG3_FL_NOT_5705 0x2
11336 #define TG3_FL_NOT_5788 0x4
11337 #define TG3_FL_NOT_5750 0x8
11338                 u32 read_mask;
11339                 u32 write_mask;
11340         } reg_tbl[] = {
11341                 /* MAC Control Registers */
11342                 { MAC_MODE, TG3_FL_NOT_5705,
11343                         0x00000000, 0x00ef6f8c },
11344                 { MAC_MODE, TG3_FL_5705,
11345                         0x00000000, 0x01ef6b8c },
11346                 { MAC_STATUS, TG3_FL_NOT_5705,
11347                         0x03800107, 0x00000000 },
11348                 { MAC_STATUS, TG3_FL_5705,
11349                         0x03800100, 0x00000000 },
11350                 { MAC_ADDR_0_HIGH, 0x0000,
11351                         0x00000000, 0x0000ffff },
11352                 { MAC_ADDR_0_LOW, 0x0000,
11353                         0x00000000, 0xffffffff },
11354                 { MAC_RX_MTU_SIZE, 0x0000,
11355                         0x00000000, 0x0000ffff },
11356                 { MAC_TX_MODE, 0x0000,
11357                         0x00000000, 0x00000070 },
11358                 { MAC_TX_LENGTHS, 0x0000,
11359                         0x00000000, 0x00003fff },
11360                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11361                         0x00000000, 0x000007fc },
11362                 { MAC_RX_MODE, TG3_FL_5705,
11363                         0x00000000, 0x000007dc },
11364                 { MAC_HASH_REG_0, 0x0000,
11365                         0x00000000, 0xffffffff },
11366                 { MAC_HASH_REG_1, 0x0000,
11367                         0x00000000, 0xffffffff },
11368                 { MAC_HASH_REG_2, 0x0000,
11369                         0x00000000, 0xffffffff },
11370                 { MAC_HASH_REG_3, 0x0000,
11371                         0x00000000, 0xffffffff },
11372
11373                 /* Receive Data and Receive BD Initiator Control Registers. */
11374                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11375                         0x00000000, 0xffffffff },
11376                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11377                         0x00000000, 0xffffffff },
11378                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11379                         0x00000000, 0x00000003 },
11380                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11381                         0x00000000, 0xffffffff },
11382                 { RCVDBDI_STD_BD+0, 0x0000,
11383                         0x00000000, 0xffffffff },
11384                 { RCVDBDI_STD_BD+4, 0x0000,
11385                         0x00000000, 0xffffffff },
11386                 { RCVDBDI_STD_BD+8, 0x0000,
11387                         0x00000000, 0xffff0002 },
11388                 { RCVDBDI_STD_BD+0xc, 0x0000,
11389                         0x00000000, 0xffffffff },
11390
11391                 /* Receive BD Initiator Control Registers. */
11392                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11393                         0x00000000, 0xffffffff },
11394                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11395                         0x00000000, 0x000003ff },
11396                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11397                         0x00000000, 0xffffffff },
11398
11399                 /* Host Coalescing Control Registers. */
11400                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11401                         0x00000000, 0x00000004 },
11402                 { HOSTCC_MODE, TG3_FL_5705,
11403                         0x00000000, 0x000000f6 },
11404                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11405                         0x00000000, 0xffffffff },
11406                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11407                         0x00000000, 0x000003ff },
11408                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11409                         0x00000000, 0xffffffff },
11410                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11411                         0x00000000, 0x000003ff },
11412                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11413                         0x00000000, 0xffffffff },
11414                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11415                         0x00000000, 0x000000ff },
11416                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11417                         0x00000000, 0xffffffff },
11418                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11419                         0x00000000, 0x000000ff },
11420                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11421                         0x00000000, 0xffffffff },
11422                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11423                         0x00000000, 0xffffffff },
11424                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11425                         0x00000000, 0xffffffff },
11426                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11427                         0x00000000, 0x000000ff },
11428                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11429                         0x00000000, 0xffffffff },
11430                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11431                         0x00000000, 0x000000ff },
11432                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11433                         0x00000000, 0xffffffff },
11434                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11435                         0x00000000, 0xffffffff },
11436                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11437                         0x00000000, 0xffffffff },
11438                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11439                         0x00000000, 0xffffffff },
11440                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11441                         0x00000000, 0xffffffff },
11442                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11443                         0xffffffff, 0x00000000 },
11444                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11445                         0xffffffff, 0x00000000 },
11446
11447                 /* Buffer Manager Control Registers. */
11448                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11449                         0x00000000, 0x007fff80 },
11450                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11451                         0x00000000, 0x007fffff },
11452                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11453                         0x00000000, 0x0000003f },
11454                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11455                         0x00000000, 0x000001ff },
11456                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11457                         0x00000000, 0x000001ff },
11458                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11459                         0xffffffff, 0x00000000 },
11460                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11461                         0xffffffff, 0x00000000 },
11462
11463                 /* Mailbox Registers */
11464                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11465                         0x00000000, 0x000001ff },
11466                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11467                         0x00000000, 0x000001ff },
11468                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11469                         0x00000000, 0x000007ff },
11470                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11471                         0x00000000, 0x000001ff },
11472
11473                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11474         };
11475
11476         is_5705 = is_5750 = 0;
11477         if (tg3_flag(tp, 5705_PLUS)) {
11478                 is_5705 = 1;
11479                 if (tg3_flag(tp, 5750_PLUS))
11480                         is_5750 = 1;
11481         }
11482
11483         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11484                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11485                         continue;
11486
11487                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11488                         continue;
11489
11490                 if (tg3_flag(tp, IS_5788) &&
11491                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11492                         continue;
11493
11494                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11495                         continue;
11496
11497                 offset = (u32) reg_tbl[i].offset;
11498                 read_mask = reg_tbl[i].read_mask;
11499                 write_mask = reg_tbl[i].write_mask;
11500
11501                 /* Save the original register content */
11502                 save_val = tr32(offset);
11503
11504                 /* Determine the read-only value. */
11505                 read_val = save_val & read_mask;
11506
11507                 /* Write zero to the register, then make sure the read-only bits
11508                  * are not changed and the read/write bits are all zeros.
11509                  */
11510                 tw32(offset, 0);
11511
11512                 val = tr32(offset);
11513
11514                 /* Test the read-only and read/write bits. */
11515                 if (((val & read_mask) != read_val) || (val & write_mask))
11516                         goto out;
11517
11518                 /* Write ones to all the bits defined by RdMask and WrMask, then
11519                  * make sure the read-only bits are not changed and the
11520                  * read/write bits are all ones.
11521                  */
11522                 tw32(offset, read_mask | write_mask);
11523
11524                 val = tr32(offset);
11525
11526                 /* Test the read-only bits. */
11527                 if ((val & read_mask) != read_val)
11528                         goto out;
11529
11530                 /* Test the read/write bits. */
11531                 if ((val & write_mask) != write_mask)
11532                         goto out;
11533
11534                 tw32(offset, save_val);
11535         }
11536
11537         return 0;
11538
11539 out:
11540         if (netif_msg_hw(tp))
11541                 netdev_err(tp->dev,
11542                            "Register test failed at offset %x\n", offset);
11543         tw32(offset, save_val);
11544         return -EIO;
11545 }
11546
11547 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11548 {
11549         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11550         int i;
11551         u32 j;
11552
11553         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11554                 for (j = 0; j < len; j += 4) {
11555                         u32 val;
11556
11557                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11558                         tg3_read_mem(tp, offset + j, &val);
11559                         if (val != test_pattern[i])
11560                                 return -EIO;
11561                 }
11562         }
11563         return 0;
11564 }
11565
11566 static int tg3_test_memory(struct tg3 *tp)
11567 {
11568         static struct mem_entry {
11569                 u32 offset;
11570                 u32 len;
11571         } mem_tbl_570x[] = {
11572                 { 0x00000000, 0x00b50},
11573                 { 0x00002000, 0x1c000},
11574                 { 0xffffffff, 0x00000}
11575         }, mem_tbl_5705[] = {
11576                 { 0x00000100, 0x0000c},
11577                 { 0x00000200, 0x00008},
11578                 { 0x00004000, 0x00800},
11579                 { 0x00006000, 0x01000},
11580                 { 0x00008000, 0x02000},
11581                 { 0x00010000, 0x0e000},
11582                 { 0xffffffff, 0x00000}
11583         }, mem_tbl_5755[] = {
11584                 { 0x00000200, 0x00008},
11585                 { 0x00004000, 0x00800},
11586                 { 0x00006000, 0x00800},
11587                 { 0x00008000, 0x02000},
11588                 { 0x00010000, 0x0c000},
11589                 { 0xffffffff, 0x00000}
11590         }, mem_tbl_5906[] = {
11591                 { 0x00000200, 0x00008},
11592                 { 0x00004000, 0x00400},
11593                 { 0x00006000, 0x00400},
11594                 { 0x00008000, 0x01000},
11595                 { 0x00010000, 0x01000},
11596                 { 0xffffffff, 0x00000}
11597         }, mem_tbl_5717[] = {
11598                 { 0x00000200, 0x00008},
11599                 { 0x00010000, 0x0a000},
11600                 { 0x00020000, 0x13c00},
11601                 { 0xffffffff, 0x00000}
11602         }, mem_tbl_57765[] = {
11603                 { 0x00000200, 0x00008},
11604                 { 0x00004000, 0x00800},
11605                 { 0x00006000, 0x09800},
11606                 { 0x00010000, 0x0a000},
11607                 { 0xffffffff, 0x00000}
11608         };
11609         struct mem_entry *mem_tbl;
11610         int err = 0;
11611         int i;
11612
11613         if (tg3_flag(tp, 5717_PLUS))
11614                 mem_tbl = mem_tbl_5717;
11615         else if (tg3_flag(tp, 57765_CLASS))
11616                 mem_tbl = mem_tbl_57765;
11617         else if (tg3_flag(tp, 5755_PLUS))
11618                 mem_tbl = mem_tbl_5755;
11619         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11620                 mem_tbl = mem_tbl_5906;
11621         else if (tg3_flag(tp, 5705_PLUS))
11622                 mem_tbl = mem_tbl_5705;
11623         else
11624                 mem_tbl = mem_tbl_570x;
11625
11626         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11627                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11628                 if (err)
11629                         break;
11630         }
11631
11632         return err;
11633 }
11634
11635 #define TG3_TSO_MSS             500
11636
11637 #define TG3_TSO_IP_HDR_LEN      20
11638 #define TG3_TSO_TCP_HDR_LEN     20
11639 #define TG3_TSO_TCP_OPT_LEN     12
11640
11641 static const u8 tg3_tso_header[] = {
11642 0x08, 0x00,
11643 0x45, 0x00, 0x00, 0x00,
11644 0x00, 0x00, 0x40, 0x00,
11645 0x40, 0x06, 0x00, 0x00,
11646 0x0a, 0x00, 0x00, 0x01,
11647 0x0a, 0x00, 0x00, 0x02,
11648 0x0d, 0x00, 0xe0, 0x00,
11649 0x00, 0x00, 0x01, 0x00,
11650 0x00, 0x00, 0x02, 0x00,
11651 0x80, 0x10, 0x10, 0x00,
11652 0x14, 0x09, 0x00, 0x00,
11653 0x01, 0x01, 0x08, 0x0a,
11654 0x11, 0x11, 0x11, 0x11,
11655 0x11, 0x11, 0x11, 0x11,
11656 };
11657
11658 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11659 {
11660         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11661         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11662         u32 budget;
11663         struct sk_buff *skb;
11664         u8 *tx_data, *rx_data;
11665         dma_addr_t map;
11666         int num_pkts, tx_len, rx_len, i, err;
11667         struct tg3_rx_buffer_desc *desc;
11668         struct tg3_napi *tnapi, *rnapi;
11669         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11670
11671         tnapi = &tp->napi[0];
11672         rnapi = &tp->napi[0];
11673         if (tp->irq_cnt > 1) {
11674                 if (tg3_flag(tp, ENABLE_RSS))
11675                         rnapi = &tp->napi[1];
11676                 if (tg3_flag(tp, ENABLE_TSS))
11677                         tnapi = &tp->napi[1];
11678         }
11679         coal_now = tnapi->coal_now | rnapi->coal_now;
11680
11681         err = -EIO;
11682
11683         tx_len = pktsz;
11684         skb = netdev_alloc_skb(tp->dev, tx_len);
11685         if (!skb)
11686                 return -ENOMEM;
11687
11688         tx_data = skb_put(skb, tx_len);
11689         memcpy(tx_data, tp->dev->dev_addr, 6);
11690         memset(tx_data + 6, 0x0, 8);
11691
11692         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11693
11694         if (tso_loopback) {
11695                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11696
11697                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11698                               TG3_TSO_TCP_OPT_LEN;
11699
11700                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11701                        sizeof(tg3_tso_header));
11702                 mss = TG3_TSO_MSS;
11703
11704                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11705                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11706
11707                 /* Set the total length field in the IP header */
11708                 iph->tot_len = htons((u16)(mss + hdr_len));
11709
11710                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11711                               TXD_FLAG_CPU_POST_DMA);
11712
11713                 if (tg3_flag(tp, HW_TSO_1) ||
11714                     tg3_flag(tp, HW_TSO_2) ||
11715                     tg3_flag(tp, HW_TSO_3)) {
11716                         struct tcphdr *th;
11717                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11718                         th = (struct tcphdr *)&tx_data[val];
11719                         th->check = 0;
11720                 } else
11721                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11722
11723                 if (tg3_flag(tp, HW_TSO_3)) {
11724                         mss |= (hdr_len & 0xc) << 12;
11725                         if (hdr_len & 0x10)
11726                                 base_flags |= 0x00000010;
11727                         base_flags |= (hdr_len & 0x3e0) << 5;
11728                 } else if (tg3_flag(tp, HW_TSO_2))
11729                         mss |= hdr_len << 9;
11730                 else if (tg3_flag(tp, HW_TSO_1) ||
11731                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11732                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11733                 } else {
11734                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11735                 }
11736
11737                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11738         } else {
11739                 num_pkts = 1;
11740                 data_off = ETH_HLEN;
11741         }
11742
11743         for (i = data_off; i < tx_len; i++)
11744                 tx_data[i] = (u8) (i & 0xff);
11745
11746         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11747         if (pci_dma_mapping_error(tp->pdev, map)) {
11748                 dev_kfree_skb(skb);
11749                 return -EIO;
11750         }
11751
11752         val = tnapi->tx_prod;
11753         tnapi->tx_buffers[val].skb = skb;
11754         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11755
11756         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11757                rnapi->coal_now);
11758
11759         udelay(10);
11760
11761         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11762
11763         budget = tg3_tx_avail(tnapi);
11764         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11765                             base_flags | TXD_FLAG_END, mss, 0)) {
11766                 tnapi->tx_buffers[val].skb = NULL;
11767                 dev_kfree_skb(skb);
11768                 return -EIO;
11769         }
11770
11771         tnapi->tx_prod++;
11772
11773         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11774         tr32_mailbox(tnapi->prodmbox);
11775
11776         udelay(10);
11777
11778         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11779         for (i = 0; i < 35; i++) {
11780                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11781                        coal_now);
11782
11783                 udelay(10);
11784
11785                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11786                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11787                 if ((tx_idx == tnapi->tx_prod) &&
11788                     (rx_idx == (rx_start_idx + num_pkts)))
11789                         break;
11790         }
11791
11792         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11793         dev_kfree_skb(skb);
11794
11795         if (tx_idx != tnapi->tx_prod)
11796                 goto out;
11797
11798         if (rx_idx != rx_start_idx + num_pkts)
11799                 goto out;
11800
11801         val = data_off;
11802         while (rx_idx != rx_start_idx) {
11803                 desc = &rnapi->rx_rcb[rx_start_idx++];
11804                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11805                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11806
11807                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11808                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11809                         goto out;
11810
11811                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11812                          - ETH_FCS_LEN;
11813
11814                 if (!tso_loopback) {
11815                         if (rx_len != tx_len)
11816                                 goto out;
11817
11818                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11819                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11820                                         goto out;
11821                         } else {
11822                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11823                                         goto out;
11824                         }
11825                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11826                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11827                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11828                         goto out;
11829                 }
11830
11831                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11832                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11833                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11834                                              mapping);
11835                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11836                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11837                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11838                                              mapping);
11839                 } else
11840                         goto out;
11841
11842                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11843                                             PCI_DMA_FROMDEVICE);
11844
11845                 rx_data += TG3_RX_OFFSET(tp);
11846                 for (i = data_off; i < rx_len; i++, val++) {
11847                         if (*(rx_data + i) != (u8) (val & 0xff))
11848                                 goto out;
11849                 }
11850         }
11851
11852         err = 0;
11853
11854         /* tg3_free_rings will unmap and free the rx_data */
11855 out:
11856         return err;
11857 }
11858
11859 #define TG3_STD_LOOPBACK_FAILED         1
11860 #define TG3_JMB_LOOPBACK_FAILED         2
11861 #define TG3_TSO_LOOPBACK_FAILED         4
11862 #define TG3_LOOPBACK_FAILED \
11863         (TG3_STD_LOOPBACK_FAILED | \
11864          TG3_JMB_LOOPBACK_FAILED | \
11865          TG3_TSO_LOOPBACK_FAILED)
11866
11867 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11868 {
11869         int err = -EIO;
11870         u32 eee_cap;
11871
11872         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11873         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11874
11875         if (!netif_running(tp->dev)) {
11876                 data[0] = TG3_LOOPBACK_FAILED;
11877                 data[1] = TG3_LOOPBACK_FAILED;
11878                 if (do_extlpbk)
11879                         data[2] = TG3_LOOPBACK_FAILED;
11880                 goto done;
11881         }
11882
11883         err = tg3_reset_hw(tp, 1);
11884         if (err) {
11885                 data[0] = TG3_LOOPBACK_FAILED;
11886                 data[1] = TG3_LOOPBACK_FAILED;
11887                 if (do_extlpbk)
11888                         data[2] = TG3_LOOPBACK_FAILED;
11889                 goto done;
11890         }
11891
11892         if (tg3_flag(tp, ENABLE_RSS)) {
11893                 int i;
11894
11895                 /* Reroute all rx packets to the 1st queue */
11896                 for (i = MAC_RSS_INDIR_TBL_0;
11897                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11898                         tw32(i, 0x0);
11899         }
11900
11901         /* HW errata - mac loopback fails in some cases on 5780.
11902          * Normal traffic and PHY loopback are not affected by
11903          * errata.  Also, the MAC loopback test is deprecated for
11904          * all newer ASIC revisions.
11905          */
11906         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11907             !tg3_flag(tp, CPMU_PRESENT)) {
11908                 tg3_mac_loopback(tp, true);
11909
11910                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11911                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11912
11913                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11914                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11915                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11916
11917                 tg3_mac_loopback(tp, false);
11918         }
11919
11920         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11921             !tg3_flag(tp, USE_PHYLIB)) {
11922                 int i;
11923
11924                 tg3_phy_lpbk_set(tp, 0, false);
11925
11926                 /* Wait for link */
11927                 for (i = 0; i < 100; i++) {
11928                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11929                                 break;
11930                         mdelay(1);
11931                 }
11932
11933                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11934                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11935                 if (tg3_flag(tp, TSO_CAPABLE) &&
11936                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11937                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11938                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11939                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11940                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11941
11942                 if (do_extlpbk) {
11943                         tg3_phy_lpbk_set(tp, 0, true);
11944
11945                         /* All link indications report up, but the hardware
11946                          * isn't really ready for about 20 msec.  Double it
11947                          * to be sure.
11948                          */
11949                         mdelay(40);
11950
11951                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11952                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11953                         if (tg3_flag(tp, TSO_CAPABLE) &&
11954                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11955                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11956                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11957                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11958                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11959                 }
11960
11961                 /* Re-enable gphy autopowerdown. */
11962                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11963                         tg3_phy_toggle_apd(tp, true);
11964         }
11965
11966         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11967
11968 done:
11969         tp->phy_flags |= eee_cap;
11970
11971         return err;
11972 }
11973
11974 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11975                           u64 *data)
11976 {
11977         struct tg3 *tp = netdev_priv(dev);
11978         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11979
11980         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11981             tg3_power_up(tp)) {
11982                 etest->flags |= ETH_TEST_FL_FAILED;
11983                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11984                 return;
11985         }
11986
11987         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11988
11989         if (tg3_test_nvram(tp) != 0) {
11990                 etest->flags |= ETH_TEST_FL_FAILED;
11991                 data[0] = 1;
11992         }
11993         if (!doextlpbk && tg3_test_link(tp)) {
11994                 etest->flags |= ETH_TEST_FL_FAILED;
11995                 data[1] = 1;
11996         }
11997         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11998                 int err, err2 = 0, irq_sync = 0;
11999
12000                 if (netif_running(dev)) {
12001                         tg3_phy_stop(tp);
12002                         tg3_netif_stop(tp);
12003                         irq_sync = 1;
12004                 }
12005
12006                 tg3_full_lock(tp, irq_sync);
12007
12008                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12009                 err = tg3_nvram_lock(tp);
12010                 tg3_halt_cpu(tp, RX_CPU_BASE);
12011                 if (!tg3_flag(tp, 5705_PLUS))
12012                         tg3_halt_cpu(tp, TX_CPU_BASE);
12013                 if (!err)
12014                         tg3_nvram_unlock(tp);
12015
12016                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12017                         tg3_phy_reset(tp);
12018
12019                 if (tg3_test_registers(tp) != 0) {
12020                         etest->flags |= ETH_TEST_FL_FAILED;
12021                         data[2] = 1;
12022                 }
12023
12024                 if (tg3_test_memory(tp) != 0) {
12025                         etest->flags |= ETH_TEST_FL_FAILED;
12026                         data[3] = 1;
12027                 }
12028
12029                 if (doextlpbk)
12030                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12031
12032                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12033                         etest->flags |= ETH_TEST_FL_FAILED;
12034
12035                 tg3_full_unlock(tp);
12036
12037                 if (tg3_test_interrupt(tp) != 0) {
12038                         etest->flags |= ETH_TEST_FL_FAILED;
12039                         data[7] = 1;
12040                 }
12041
12042                 tg3_full_lock(tp, 0);
12043
12044                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12045                 if (netif_running(dev)) {
12046                         tg3_flag_set(tp, INIT_COMPLETE);
12047                         err2 = tg3_restart_hw(tp, 1);
12048                         if (!err2)
12049                                 tg3_netif_start(tp);
12050                 }
12051
12052                 tg3_full_unlock(tp);
12053
12054                 if (irq_sync && !err2)
12055                         tg3_phy_start(tp);
12056         }
12057         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12058                 tg3_power_down(tp);
12059
12060 }
12061
12062 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12063 {
12064         struct mii_ioctl_data *data = if_mii(ifr);
12065         struct tg3 *tp = netdev_priv(dev);
12066         int err;
12067
12068         if (tg3_flag(tp, USE_PHYLIB)) {
12069                 struct phy_device *phydev;
12070                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12071                         return -EAGAIN;
12072                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12073                 return phy_mii_ioctl(phydev, ifr, cmd);
12074         }
12075
12076         switch (cmd) {
12077         case SIOCGMIIPHY:
12078                 data->phy_id = tp->phy_addr;
12079
12080                 /* fallthru */
12081         case SIOCGMIIREG: {
12082                 u32 mii_regval;
12083
12084                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12085                         break;                  /* We have no PHY */
12086
12087                 if (!netif_running(dev))
12088                         return -EAGAIN;
12089
12090                 spin_lock_bh(&tp->lock);
12091                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12092                 spin_unlock_bh(&tp->lock);
12093
12094                 data->val_out = mii_regval;
12095
12096                 return err;
12097         }
12098
12099         case SIOCSMIIREG:
12100                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12101                         break;                  /* We have no PHY */
12102
12103                 if (!netif_running(dev))
12104                         return -EAGAIN;
12105
12106                 spin_lock_bh(&tp->lock);
12107                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12108                 spin_unlock_bh(&tp->lock);
12109
12110                 return err;
12111
12112         default:
12113                 /* do nothing */
12114                 break;
12115         }
12116         return -EOPNOTSUPP;
12117 }
12118
12119 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12120 {
12121         struct tg3 *tp = netdev_priv(dev);
12122
12123         memcpy(ec, &tp->coal, sizeof(*ec));
12124         return 0;
12125 }
12126
12127 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12128 {
12129         struct tg3 *tp = netdev_priv(dev);
12130         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12131         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12132
12133         if (!tg3_flag(tp, 5705_PLUS)) {
12134                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12135                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12136                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12137                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12138         }
12139
12140         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12141             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12142             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12143             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12144             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12145             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12146             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12147             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12148             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12149             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12150                 return -EINVAL;
12151
12152         /* No rx interrupts will be generated if both are zero */
12153         if ((ec->rx_coalesce_usecs == 0) &&
12154             (ec->rx_max_coalesced_frames == 0))
12155                 return -EINVAL;
12156
12157         /* No tx interrupts will be generated if both are zero */
12158         if ((ec->tx_coalesce_usecs == 0) &&
12159             (ec->tx_max_coalesced_frames == 0))
12160                 return -EINVAL;
12161
12162         /* Only copy relevant parameters, ignore all others. */
12163         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12164         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12165         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12166         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12167         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12168         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12169         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12170         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12171         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12172
12173         if (netif_running(dev)) {
12174                 tg3_full_lock(tp, 0);
12175                 __tg3_set_coalesce(tp, &tp->coal);
12176                 tg3_full_unlock(tp);
12177         }
12178         return 0;
12179 }
12180
12181 static const struct ethtool_ops tg3_ethtool_ops = {
12182         .get_settings           = tg3_get_settings,
12183         .set_settings           = tg3_set_settings,
12184         .get_drvinfo            = tg3_get_drvinfo,
12185         .get_regs_len           = tg3_get_regs_len,
12186         .get_regs               = tg3_get_regs,
12187         .get_wol                = tg3_get_wol,
12188         .set_wol                = tg3_set_wol,
12189         .get_msglevel           = tg3_get_msglevel,
12190         .set_msglevel           = tg3_set_msglevel,
12191         .nway_reset             = tg3_nway_reset,
12192         .get_link               = ethtool_op_get_link,
12193         .get_eeprom_len         = tg3_get_eeprom_len,
12194         .get_eeprom             = tg3_get_eeprom,
12195         .set_eeprom             = tg3_set_eeprom,
12196         .get_ringparam          = tg3_get_ringparam,
12197         .set_ringparam          = tg3_set_ringparam,
12198         .get_pauseparam         = tg3_get_pauseparam,
12199         .set_pauseparam         = tg3_set_pauseparam,
12200         .self_test              = tg3_self_test,
12201         .get_strings            = tg3_get_strings,
12202         .set_phys_id            = tg3_set_phys_id,
12203         .get_ethtool_stats      = tg3_get_ethtool_stats,
12204         .get_coalesce           = tg3_get_coalesce,
12205         .set_coalesce           = tg3_set_coalesce,
12206         .get_sset_count         = tg3_get_sset_count,
12207         .get_rxnfc              = tg3_get_rxnfc,
12208         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12209         .get_rxfh_indir         = tg3_get_rxfh_indir,
12210         .set_rxfh_indir         = tg3_set_rxfh_indir,
12211 };
12212
12213 static void tg3_set_rx_mode(struct net_device *dev)
12214 {
12215         struct tg3 *tp = netdev_priv(dev);
12216
12217         if (!netif_running(dev))
12218                 return;
12219
12220         tg3_full_lock(tp, 0);
12221         __tg3_set_rx_mode(dev);
12222         tg3_full_unlock(tp);
12223 }
12224
12225 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12226                                int new_mtu)
12227 {
12228         dev->mtu = new_mtu;
12229
12230         if (new_mtu > ETH_DATA_LEN) {
12231                 if (tg3_flag(tp, 5780_CLASS)) {
12232                         netdev_update_features(dev);
12233                         tg3_flag_clear(tp, TSO_CAPABLE);
12234                 } else {
12235                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12236                 }
12237         } else {
12238                 if (tg3_flag(tp, 5780_CLASS)) {
12239                         tg3_flag_set(tp, TSO_CAPABLE);
12240                         netdev_update_features(dev);
12241                 }
12242                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12243         }
12244 }
12245
12246 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12247 {
12248         struct tg3 *tp = netdev_priv(dev);
12249         int err;
12250
12251         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12252                 return -EINVAL;
12253
12254         if (!netif_running(dev)) {
12255                 /* We'll just catch it later when the
12256                  * device is up'd.
12257                  */
12258                 tg3_set_mtu(dev, tp, new_mtu);
12259                 return 0;
12260         }
12261
12262         tg3_phy_stop(tp);
12263
12264         tg3_netif_stop(tp);
12265
12266         tg3_full_lock(tp, 1);
12267
12268         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12269
12270         tg3_set_mtu(dev, tp, new_mtu);
12271
12272         err = tg3_restart_hw(tp, 0);
12273
12274         if (!err)
12275                 tg3_netif_start(tp);
12276
12277         tg3_full_unlock(tp);
12278
12279         if (!err)
12280                 tg3_phy_start(tp);
12281
12282         return err;
12283 }
12284
12285 static const struct net_device_ops tg3_netdev_ops = {
12286         .ndo_open               = tg3_open,
12287         .ndo_stop               = tg3_close,
12288         .ndo_start_xmit         = tg3_start_xmit,
12289         .ndo_get_stats64        = tg3_get_stats64,
12290         .ndo_validate_addr      = eth_validate_addr,
12291         .ndo_set_rx_mode        = tg3_set_rx_mode,
12292         .ndo_set_mac_address    = tg3_set_mac_addr,
12293         .ndo_do_ioctl           = tg3_ioctl,
12294         .ndo_tx_timeout         = tg3_tx_timeout,
12295         .ndo_change_mtu         = tg3_change_mtu,
12296         .ndo_fix_features       = tg3_fix_features,
12297         .ndo_set_features       = tg3_set_features,
12298 #ifdef CONFIG_NET_POLL_CONTROLLER
12299         .ndo_poll_controller    = tg3_poll_controller,
12300 #endif
12301 };
12302
12303 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12304 {
12305         u32 cursize, val, magic;
12306
12307         tp->nvram_size = EEPROM_CHIP_SIZE;
12308
12309         if (tg3_nvram_read(tp, 0, &magic) != 0)
12310                 return;
12311
12312         if ((magic != TG3_EEPROM_MAGIC) &&
12313             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12314             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12315                 return;
12316
12317         /*
12318          * Size the chip by reading offsets at increasing powers of two.
12319          * When we encounter our validation signature, we know the addressing
12320          * has wrapped around, and thus have our chip size.
12321          */
12322         cursize = 0x10;
12323
12324         while (cursize < tp->nvram_size) {
12325                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12326                         return;
12327
12328                 if (val == magic)
12329                         break;
12330
12331                 cursize <<= 1;
12332         }
12333
12334         tp->nvram_size = cursize;
12335 }
12336
12337 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12338 {
12339         u32 val;
12340
12341         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12342                 return;
12343
12344         /* Selfboot format */
12345         if (val != TG3_EEPROM_MAGIC) {
12346                 tg3_get_eeprom_size(tp);
12347                 return;
12348         }
12349
12350         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12351                 if (val != 0) {
12352                         /* This is confusing.  We want to operate on the
12353                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12354                          * call will read from NVRAM and byteswap the data
12355                          * according to the byteswapping settings for all
12356                          * other register accesses.  This ensures the data we
12357                          * want will always reside in the lower 16-bits.
12358                          * However, the data in NVRAM is in LE format, which
12359                          * means the data from the NVRAM read will always be
12360                          * opposite the endianness of the CPU.  The 16-bit
12361                          * byteswap then brings the data to CPU endianness.
12362                          */
12363                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12364                         return;
12365                 }
12366         }
12367         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12368 }
12369
12370 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12371 {
12372         u32 nvcfg1;
12373
12374         nvcfg1 = tr32(NVRAM_CFG1);
12375         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12376                 tg3_flag_set(tp, FLASH);
12377         } else {
12378                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12379                 tw32(NVRAM_CFG1, nvcfg1);
12380         }
12381
12382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12383             tg3_flag(tp, 5780_CLASS)) {
12384                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12385                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12386                         tp->nvram_jedecnum = JEDEC_ATMEL;
12387                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12388                         tg3_flag_set(tp, NVRAM_BUFFERED);
12389                         break;
12390                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12391                         tp->nvram_jedecnum = JEDEC_ATMEL;
12392                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12393                         break;
12394                 case FLASH_VENDOR_ATMEL_EEPROM:
12395                         tp->nvram_jedecnum = JEDEC_ATMEL;
12396                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12397                         tg3_flag_set(tp, NVRAM_BUFFERED);
12398                         break;
12399                 case FLASH_VENDOR_ST:
12400                         tp->nvram_jedecnum = JEDEC_ST;
12401                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12402                         tg3_flag_set(tp, NVRAM_BUFFERED);
12403                         break;
12404                 case FLASH_VENDOR_SAIFUN:
12405                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12406                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12407                         break;
12408                 case FLASH_VENDOR_SST_SMALL:
12409                 case FLASH_VENDOR_SST_LARGE:
12410                         tp->nvram_jedecnum = JEDEC_SST;
12411                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12412                         break;
12413                 }
12414         } else {
12415                 tp->nvram_jedecnum = JEDEC_ATMEL;
12416                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12417                 tg3_flag_set(tp, NVRAM_BUFFERED);
12418         }
12419 }
12420
12421 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12422 {
12423         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12424         case FLASH_5752PAGE_SIZE_256:
12425                 tp->nvram_pagesize = 256;
12426                 break;
12427         case FLASH_5752PAGE_SIZE_512:
12428                 tp->nvram_pagesize = 512;
12429                 break;
12430         case FLASH_5752PAGE_SIZE_1K:
12431                 tp->nvram_pagesize = 1024;
12432                 break;
12433         case FLASH_5752PAGE_SIZE_2K:
12434                 tp->nvram_pagesize = 2048;
12435                 break;
12436         case FLASH_5752PAGE_SIZE_4K:
12437                 tp->nvram_pagesize = 4096;
12438                 break;
12439         case FLASH_5752PAGE_SIZE_264:
12440                 tp->nvram_pagesize = 264;
12441                 break;
12442         case FLASH_5752PAGE_SIZE_528:
12443                 tp->nvram_pagesize = 528;
12444                 break;
12445         }
12446 }
12447
12448 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12449 {
12450         u32 nvcfg1;
12451
12452         nvcfg1 = tr32(NVRAM_CFG1);
12453
12454         /* NVRAM protection for TPM */
12455         if (nvcfg1 & (1 << 27))
12456                 tg3_flag_set(tp, PROTECTED_NVRAM);
12457
12458         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12459         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12460         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12461                 tp->nvram_jedecnum = JEDEC_ATMEL;
12462                 tg3_flag_set(tp, NVRAM_BUFFERED);
12463                 break;
12464         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12465                 tp->nvram_jedecnum = JEDEC_ATMEL;
12466                 tg3_flag_set(tp, NVRAM_BUFFERED);
12467                 tg3_flag_set(tp, FLASH);
12468                 break;
12469         case FLASH_5752VENDOR_ST_M45PE10:
12470         case FLASH_5752VENDOR_ST_M45PE20:
12471         case FLASH_5752VENDOR_ST_M45PE40:
12472                 tp->nvram_jedecnum = JEDEC_ST;
12473                 tg3_flag_set(tp, NVRAM_BUFFERED);
12474                 tg3_flag_set(tp, FLASH);
12475                 break;
12476         }
12477
12478         if (tg3_flag(tp, FLASH)) {
12479                 tg3_nvram_get_pagesize(tp, nvcfg1);
12480         } else {
12481                 /* For eeprom, set pagesize to maximum eeprom size */
12482                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12483
12484                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12485                 tw32(NVRAM_CFG1, nvcfg1);
12486         }
12487 }
12488
12489 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12490 {
12491         u32 nvcfg1, protect = 0;
12492
12493         nvcfg1 = tr32(NVRAM_CFG1);
12494
12495         /* NVRAM protection for TPM */
12496         if (nvcfg1 & (1 << 27)) {
12497                 tg3_flag_set(tp, PROTECTED_NVRAM);
12498                 protect = 1;
12499         }
12500
12501         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12502         switch (nvcfg1) {
12503         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12504         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12505         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12506         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12507                 tp->nvram_jedecnum = JEDEC_ATMEL;
12508                 tg3_flag_set(tp, NVRAM_BUFFERED);
12509                 tg3_flag_set(tp, FLASH);
12510                 tp->nvram_pagesize = 264;
12511                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12512                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12513                         tp->nvram_size = (protect ? 0x3e200 :
12514                                           TG3_NVRAM_SIZE_512KB);
12515                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12516                         tp->nvram_size = (protect ? 0x1f200 :
12517                                           TG3_NVRAM_SIZE_256KB);
12518                 else
12519                         tp->nvram_size = (protect ? 0x1f200 :
12520                                           TG3_NVRAM_SIZE_128KB);
12521                 break;
12522         case FLASH_5752VENDOR_ST_M45PE10:
12523         case FLASH_5752VENDOR_ST_M45PE20:
12524         case FLASH_5752VENDOR_ST_M45PE40:
12525                 tp->nvram_jedecnum = JEDEC_ST;
12526                 tg3_flag_set(tp, NVRAM_BUFFERED);
12527                 tg3_flag_set(tp, FLASH);
12528                 tp->nvram_pagesize = 256;
12529                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12530                         tp->nvram_size = (protect ?
12531                                           TG3_NVRAM_SIZE_64KB :
12532                                           TG3_NVRAM_SIZE_128KB);
12533                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12534                         tp->nvram_size = (protect ?
12535                                           TG3_NVRAM_SIZE_64KB :
12536                                           TG3_NVRAM_SIZE_256KB);
12537                 else
12538                         tp->nvram_size = (protect ?
12539                                           TG3_NVRAM_SIZE_128KB :
12540                                           TG3_NVRAM_SIZE_512KB);
12541                 break;
12542         }
12543 }
12544
12545 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12546 {
12547         u32 nvcfg1;
12548
12549         nvcfg1 = tr32(NVRAM_CFG1);
12550
12551         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12552         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12553         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12554         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12555         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12556                 tp->nvram_jedecnum = JEDEC_ATMEL;
12557                 tg3_flag_set(tp, NVRAM_BUFFERED);
12558                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12559
12560                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12561                 tw32(NVRAM_CFG1, nvcfg1);
12562                 break;
12563         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12564         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12565         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12566         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12567                 tp->nvram_jedecnum = JEDEC_ATMEL;
12568                 tg3_flag_set(tp, NVRAM_BUFFERED);
12569                 tg3_flag_set(tp, FLASH);
12570                 tp->nvram_pagesize = 264;
12571                 break;
12572         case FLASH_5752VENDOR_ST_M45PE10:
12573         case FLASH_5752VENDOR_ST_M45PE20:
12574         case FLASH_5752VENDOR_ST_M45PE40:
12575                 tp->nvram_jedecnum = JEDEC_ST;
12576                 tg3_flag_set(tp, NVRAM_BUFFERED);
12577                 tg3_flag_set(tp, FLASH);
12578                 tp->nvram_pagesize = 256;
12579                 break;
12580         }
12581 }
12582
12583 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12584 {
12585         u32 nvcfg1, protect = 0;
12586
12587         nvcfg1 = tr32(NVRAM_CFG1);
12588
12589         /* NVRAM protection for TPM */
12590         if (nvcfg1 & (1 << 27)) {
12591                 tg3_flag_set(tp, PROTECTED_NVRAM);
12592                 protect = 1;
12593         }
12594
12595         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12596         switch (nvcfg1) {
12597         case FLASH_5761VENDOR_ATMEL_ADB021D:
12598         case FLASH_5761VENDOR_ATMEL_ADB041D:
12599         case FLASH_5761VENDOR_ATMEL_ADB081D:
12600         case FLASH_5761VENDOR_ATMEL_ADB161D:
12601         case FLASH_5761VENDOR_ATMEL_MDB021D:
12602         case FLASH_5761VENDOR_ATMEL_MDB041D:
12603         case FLASH_5761VENDOR_ATMEL_MDB081D:
12604         case FLASH_5761VENDOR_ATMEL_MDB161D:
12605                 tp->nvram_jedecnum = JEDEC_ATMEL;
12606                 tg3_flag_set(tp, NVRAM_BUFFERED);
12607                 tg3_flag_set(tp, FLASH);
12608                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12609                 tp->nvram_pagesize = 256;
12610                 break;
12611         case FLASH_5761VENDOR_ST_A_M45PE20:
12612         case FLASH_5761VENDOR_ST_A_M45PE40:
12613         case FLASH_5761VENDOR_ST_A_M45PE80:
12614         case FLASH_5761VENDOR_ST_A_M45PE16:
12615         case FLASH_5761VENDOR_ST_M_M45PE20:
12616         case FLASH_5761VENDOR_ST_M_M45PE40:
12617         case FLASH_5761VENDOR_ST_M_M45PE80:
12618         case FLASH_5761VENDOR_ST_M_M45PE16:
12619                 tp->nvram_jedecnum = JEDEC_ST;
12620                 tg3_flag_set(tp, NVRAM_BUFFERED);
12621                 tg3_flag_set(tp, FLASH);
12622                 tp->nvram_pagesize = 256;
12623                 break;
12624         }
12625
12626         if (protect) {
12627                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12628         } else {
12629                 switch (nvcfg1) {
12630                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12631                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12632                 case FLASH_5761VENDOR_ST_A_M45PE16:
12633                 case FLASH_5761VENDOR_ST_M_M45PE16:
12634                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12635                         break;
12636                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12637                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12638                 case FLASH_5761VENDOR_ST_A_M45PE80:
12639                 case FLASH_5761VENDOR_ST_M_M45PE80:
12640                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12641                         break;
12642                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12643                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12644                 case FLASH_5761VENDOR_ST_A_M45PE40:
12645                 case FLASH_5761VENDOR_ST_M_M45PE40:
12646                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12647                         break;
12648                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12649                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12650                 case FLASH_5761VENDOR_ST_A_M45PE20:
12651                 case FLASH_5761VENDOR_ST_M_M45PE20:
12652                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12653                         break;
12654                 }
12655         }
12656 }
12657
12658 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12659 {
12660         tp->nvram_jedecnum = JEDEC_ATMEL;
12661         tg3_flag_set(tp, NVRAM_BUFFERED);
12662         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12663 }
12664
12665 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12666 {
12667         u32 nvcfg1;
12668
12669         nvcfg1 = tr32(NVRAM_CFG1);
12670
12671         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12672         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12673         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12674                 tp->nvram_jedecnum = JEDEC_ATMEL;
12675                 tg3_flag_set(tp, NVRAM_BUFFERED);
12676                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12677
12678                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12679                 tw32(NVRAM_CFG1, nvcfg1);
12680                 return;
12681         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12682         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12683         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12684         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12685         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12686         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12687         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12688                 tp->nvram_jedecnum = JEDEC_ATMEL;
12689                 tg3_flag_set(tp, NVRAM_BUFFERED);
12690                 tg3_flag_set(tp, FLASH);
12691
12692                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12693                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12694                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12695                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12696                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12697                         break;
12698                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12699                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12700                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12701                         break;
12702                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12703                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12704                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12705                         break;
12706                 }
12707                 break;
12708         case FLASH_5752VENDOR_ST_M45PE10:
12709         case FLASH_5752VENDOR_ST_M45PE20:
12710         case FLASH_5752VENDOR_ST_M45PE40:
12711                 tp->nvram_jedecnum = JEDEC_ST;
12712                 tg3_flag_set(tp, NVRAM_BUFFERED);
12713                 tg3_flag_set(tp, FLASH);
12714
12715                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12716                 case FLASH_5752VENDOR_ST_M45PE10:
12717                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12718                         break;
12719                 case FLASH_5752VENDOR_ST_M45PE20:
12720                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12721                         break;
12722                 case FLASH_5752VENDOR_ST_M45PE40:
12723                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12724                         break;
12725                 }
12726                 break;
12727         default:
12728                 tg3_flag_set(tp, NO_NVRAM);
12729                 return;
12730         }
12731
12732         tg3_nvram_get_pagesize(tp, nvcfg1);
12733         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12734                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12735 }
12736
12737
12738 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12739 {
12740         u32 nvcfg1;
12741
12742         nvcfg1 = tr32(NVRAM_CFG1);
12743
12744         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12745         case FLASH_5717VENDOR_ATMEL_EEPROM:
12746         case FLASH_5717VENDOR_MICRO_EEPROM:
12747                 tp->nvram_jedecnum = JEDEC_ATMEL;
12748                 tg3_flag_set(tp, NVRAM_BUFFERED);
12749                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12750
12751                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12752                 tw32(NVRAM_CFG1, nvcfg1);
12753                 return;
12754         case FLASH_5717VENDOR_ATMEL_MDB011D:
12755         case FLASH_5717VENDOR_ATMEL_ADB011B:
12756         case FLASH_5717VENDOR_ATMEL_ADB011D:
12757         case FLASH_5717VENDOR_ATMEL_MDB021D:
12758         case FLASH_5717VENDOR_ATMEL_ADB021B:
12759         case FLASH_5717VENDOR_ATMEL_ADB021D:
12760         case FLASH_5717VENDOR_ATMEL_45USPT:
12761                 tp->nvram_jedecnum = JEDEC_ATMEL;
12762                 tg3_flag_set(tp, NVRAM_BUFFERED);
12763                 tg3_flag_set(tp, FLASH);
12764
12765                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12766                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12767                         /* Detect size with tg3_nvram_get_size() */
12768                         break;
12769                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12770                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12771                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12772                         break;
12773                 default:
12774                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12775                         break;
12776                 }
12777                 break;
12778         case FLASH_5717VENDOR_ST_M_M25PE10:
12779         case FLASH_5717VENDOR_ST_A_M25PE10:
12780         case FLASH_5717VENDOR_ST_M_M45PE10:
12781         case FLASH_5717VENDOR_ST_A_M45PE10:
12782         case FLASH_5717VENDOR_ST_M_M25PE20:
12783         case FLASH_5717VENDOR_ST_A_M25PE20:
12784         case FLASH_5717VENDOR_ST_M_M45PE20:
12785         case FLASH_5717VENDOR_ST_A_M45PE20:
12786         case FLASH_5717VENDOR_ST_25USPT:
12787         case FLASH_5717VENDOR_ST_45USPT:
12788                 tp->nvram_jedecnum = JEDEC_ST;
12789                 tg3_flag_set(tp, NVRAM_BUFFERED);
12790                 tg3_flag_set(tp, FLASH);
12791
12792                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12793                 case FLASH_5717VENDOR_ST_M_M25PE20:
12794                 case FLASH_5717VENDOR_ST_M_M45PE20:
12795                         /* Detect size with tg3_nvram_get_size() */
12796                         break;
12797                 case FLASH_5717VENDOR_ST_A_M25PE20:
12798                 case FLASH_5717VENDOR_ST_A_M45PE20:
12799                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12800                         break;
12801                 default:
12802                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12803                         break;
12804                 }
12805                 break;
12806         default:
12807                 tg3_flag_set(tp, NO_NVRAM);
12808                 return;
12809         }
12810
12811         tg3_nvram_get_pagesize(tp, nvcfg1);
12812         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12813                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12814 }
12815
12816 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12817 {
12818         u32 nvcfg1, nvmpinstrp;
12819
12820         nvcfg1 = tr32(NVRAM_CFG1);
12821         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12822
12823         switch (nvmpinstrp) {
12824         case FLASH_5720_EEPROM_HD:
12825         case FLASH_5720_EEPROM_LD:
12826                 tp->nvram_jedecnum = JEDEC_ATMEL;
12827                 tg3_flag_set(tp, NVRAM_BUFFERED);
12828
12829                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12830                 tw32(NVRAM_CFG1, nvcfg1);
12831                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12832                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12833                 else
12834                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12835                 return;
12836         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12837         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12838         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12839         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12840         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12841         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12842         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12843         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12844         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12845         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12846         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12847         case FLASH_5720VENDOR_ATMEL_45USPT:
12848                 tp->nvram_jedecnum = JEDEC_ATMEL;
12849                 tg3_flag_set(tp, NVRAM_BUFFERED);
12850                 tg3_flag_set(tp, FLASH);
12851
12852                 switch (nvmpinstrp) {
12853                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12854                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12855                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12856                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12857                         break;
12858                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12859                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12860                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12861                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12862                         break;
12863                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12864                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12865                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12866                         break;
12867                 default:
12868                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12869                         break;
12870                 }
12871                 break;
12872         case FLASH_5720VENDOR_M_ST_M25PE10:
12873         case FLASH_5720VENDOR_M_ST_M45PE10:
12874         case FLASH_5720VENDOR_A_ST_M25PE10:
12875         case FLASH_5720VENDOR_A_ST_M45PE10:
12876         case FLASH_5720VENDOR_M_ST_M25PE20:
12877         case FLASH_5720VENDOR_M_ST_M45PE20:
12878         case FLASH_5720VENDOR_A_ST_M25PE20:
12879         case FLASH_5720VENDOR_A_ST_M45PE20:
12880         case FLASH_5720VENDOR_M_ST_M25PE40:
12881         case FLASH_5720VENDOR_M_ST_M45PE40:
12882         case FLASH_5720VENDOR_A_ST_M25PE40:
12883         case FLASH_5720VENDOR_A_ST_M45PE40:
12884         case FLASH_5720VENDOR_M_ST_M25PE80:
12885         case FLASH_5720VENDOR_M_ST_M45PE80:
12886         case FLASH_5720VENDOR_A_ST_M25PE80:
12887         case FLASH_5720VENDOR_A_ST_M45PE80:
12888         case FLASH_5720VENDOR_ST_25USPT:
12889         case FLASH_5720VENDOR_ST_45USPT:
12890                 tp->nvram_jedecnum = JEDEC_ST;
12891                 tg3_flag_set(tp, NVRAM_BUFFERED);
12892                 tg3_flag_set(tp, FLASH);
12893
12894                 switch (nvmpinstrp) {
12895                 case FLASH_5720VENDOR_M_ST_M25PE20:
12896                 case FLASH_5720VENDOR_M_ST_M45PE20:
12897                 case FLASH_5720VENDOR_A_ST_M25PE20:
12898                 case FLASH_5720VENDOR_A_ST_M45PE20:
12899                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12900                         break;
12901                 case FLASH_5720VENDOR_M_ST_M25PE40:
12902                 case FLASH_5720VENDOR_M_ST_M45PE40:
12903                 case FLASH_5720VENDOR_A_ST_M25PE40:
12904                 case FLASH_5720VENDOR_A_ST_M45PE40:
12905                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12906                         break;
12907                 case FLASH_5720VENDOR_M_ST_M25PE80:
12908                 case FLASH_5720VENDOR_M_ST_M45PE80:
12909                 case FLASH_5720VENDOR_A_ST_M25PE80:
12910                 case FLASH_5720VENDOR_A_ST_M45PE80:
12911                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12912                         break;
12913                 default:
12914                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12915                         break;
12916                 }
12917                 break;
12918         default:
12919                 tg3_flag_set(tp, NO_NVRAM);
12920                 return;
12921         }
12922
12923         tg3_nvram_get_pagesize(tp, nvcfg1);
12924         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12925                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12926 }
12927
12928 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12929 static void __devinit tg3_nvram_init(struct tg3 *tp)
12930 {
12931         tw32_f(GRC_EEPROM_ADDR,
12932              (EEPROM_ADDR_FSM_RESET |
12933               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12934                EEPROM_ADDR_CLKPERD_SHIFT)));
12935
12936         msleep(1);
12937
12938         /* Enable seeprom accesses. */
12939         tw32_f(GRC_LOCAL_CTRL,
12940              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12941         udelay(100);
12942
12943         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12944             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12945                 tg3_flag_set(tp, NVRAM);
12946
12947                 if (tg3_nvram_lock(tp)) {
12948                         netdev_warn(tp->dev,
12949                                     "Cannot get nvram lock, %s failed\n",
12950                                     __func__);
12951                         return;
12952                 }
12953                 tg3_enable_nvram_access(tp);
12954
12955                 tp->nvram_size = 0;
12956
12957                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12958                         tg3_get_5752_nvram_info(tp);
12959                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12960                         tg3_get_5755_nvram_info(tp);
12961                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12962                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12963                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12964                         tg3_get_5787_nvram_info(tp);
12965                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12966                         tg3_get_5761_nvram_info(tp);
12967                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12968                         tg3_get_5906_nvram_info(tp);
12969                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12970                          tg3_flag(tp, 57765_CLASS))
12971                         tg3_get_57780_nvram_info(tp);
12972                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12973                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12974                         tg3_get_5717_nvram_info(tp);
12975                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12976                         tg3_get_5720_nvram_info(tp);
12977                 else
12978                         tg3_get_nvram_info(tp);
12979
12980                 if (tp->nvram_size == 0)
12981                         tg3_get_nvram_size(tp);
12982
12983                 tg3_disable_nvram_access(tp);
12984                 tg3_nvram_unlock(tp);
12985
12986         } else {
12987                 tg3_flag_clear(tp, NVRAM);
12988                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12989
12990                 tg3_get_eeprom_size(tp);
12991         }
12992 }
12993
12994 struct subsys_tbl_ent {
12995         u16 subsys_vendor, subsys_devid;
12996         u32 phy_id;
12997 };
12998
12999 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13000         /* Broadcom boards. */
13001         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13002           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13003         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13004           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13005         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13006           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13007         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13008           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13009         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13010           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13011         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13012           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13014           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13018           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13020           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13022           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13023
13024         /* 3com boards. */
13025         { TG3PCI_SUBVENDOR_ID_3COM,
13026           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13027         { TG3PCI_SUBVENDOR_ID_3COM,
13028           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13029         { TG3PCI_SUBVENDOR_ID_3COM,
13030           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13031         { TG3PCI_SUBVENDOR_ID_3COM,
13032           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13033         { TG3PCI_SUBVENDOR_ID_3COM,
13034           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13035
13036         /* DELL boards. */
13037         { TG3PCI_SUBVENDOR_ID_DELL,
13038           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13039         { TG3PCI_SUBVENDOR_ID_DELL,
13040           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13041         { TG3PCI_SUBVENDOR_ID_DELL,
13042           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13043         { TG3PCI_SUBVENDOR_ID_DELL,
13044           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13045
13046         /* Compaq boards. */
13047         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13048           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13049         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13050           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13051         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13052           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13053         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13054           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13055         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13056           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13057
13058         /* IBM boards. */
13059         { TG3PCI_SUBVENDOR_ID_IBM,
13060           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13061 };
13062
13063 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13064 {
13065         int i;
13066
13067         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13068                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13069                      tp->pdev->subsystem_vendor) &&
13070                     (subsys_id_to_phy_id[i].subsys_devid ==
13071                      tp->pdev->subsystem_device))
13072                         return &subsys_id_to_phy_id[i];
13073         }
13074         return NULL;
13075 }
13076
13077 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13078 {
13079         u32 val;
13080
13081         tp->phy_id = TG3_PHY_ID_INVALID;
13082         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13083
13084         /* Assume an onboard device and WOL capable by default.  */
13085         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13086         tg3_flag_set(tp, WOL_CAP);
13087
13088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13089                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13090                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13091                         tg3_flag_set(tp, IS_NIC);
13092                 }
13093                 val = tr32(VCPU_CFGSHDW);
13094                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13095                         tg3_flag_set(tp, ASPM_WORKAROUND);
13096                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13097                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13098                         tg3_flag_set(tp, WOL_ENABLE);
13099                         device_set_wakeup_enable(&tp->pdev->dev, true);
13100                 }
13101                 goto done;
13102         }
13103
13104         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13105         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13106                 u32 nic_cfg, led_cfg;
13107                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13108                 int eeprom_phy_serdes = 0;
13109
13110                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13111                 tp->nic_sram_data_cfg = nic_cfg;
13112
13113                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13114                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13115                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13116                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13117                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13118                     (ver > 0) && (ver < 0x100))
13119                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13120
13121                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13122                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13123
13124                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13125                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13126                         eeprom_phy_serdes = 1;
13127
13128                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13129                 if (nic_phy_id != 0) {
13130                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13131                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13132
13133                         eeprom_phy_id  = (id1 >> 16) << 10;
13134                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13135                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13136                 } else
13137                         eeprom_phy_id = 0;
13138
13139                 tp->phy_id = eeprom_phy_id;
13140                 if (eeprom_phy_serdes) {
13141                         if (!tg3_flag(tp, 5705_PLUS))
13142                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13143                         else
13144                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13145                 }
13146
13147                 if (tg3_flag(tp, 5750_PLUS))
13148                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13149                                     SHASTA_EXT_LED_MODE_MASK);
13150                 else
13151                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13152
13153                 switch (led_cfg) {
13154                 default:
13155                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13156                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13157                         break;
13158
13159                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13160                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13161                         break;
13162
13163                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13164                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13165
13166                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13167                          * read on some older 5700/5701 bootcode.
13168                          */
13169                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13170                             ASIC_REV_5700 ||
13171                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13172                             ASIC_REV_5701)
13173                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13174
13175                         break;
13176
13177                 case SHASTA_EXT_LED_SHARED:
13178                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13179                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13180                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13181                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13182                                                  LED_CTRL_MODE_PHY_2);
13183                         break;
13184
13185                 case SHASTA_EXT_LED_MAC:
13186                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13187                         break;
13188
13189                 case SHASTA_EXT_LED_COMBO:
13190                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13191                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13192                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13193                                                  LED_CTRL_MODE_PHY_2);
13194                         break;
13195
13196                 }
13197
13198                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13199                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13200                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13201                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13202
13203                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13204                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13205
13206                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13207                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13208                         if ((tp->pdev->subsystem_vendor ==
13209                              PCI_VENDOR_ID_ARIMA) &&
13210                             (tp->pdev->subsystem_device == 0x205a ||
13211                              tp->pdev->subsystem_device == 0x2063))
13212                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13213                 } else {
13214                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13215                         tg3_flag_set(tp, IS_NIC);
13216                 }
13217
13218                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13219                         tg3_flag_set(tp, ENABLE_ASF);
13220                         if (tg3_flag(tp, 5750_PLUS))
13221                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13222                 }
13223
13224                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13225                     tg3_flag(tp, 5750_PLUS))
13226                         tg3_flag_set(tp, ENABLE_APE);
13227
13228                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13229                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13230                         tg3_flag_clear(tp, WOL_CAP);
13231
13232                 if (tg3_flag(tp, WOL_CAP) &&
13233                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13234                         tg3_flag_set(tp, WOL_ENABLE);
13235                         device_set_wakeup_enable(&tp->pdev->dev, true);
13236                 }
13237
13238                 if (cfg2 & (1 << 17))
13239                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13240
13241                 /* serdes signal pre-emphasis in register 0x590 set by */
13242                 /* bootcode if bit 18 is set */
13243                 if (cfg2 & (1 << 18))
13244                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13245
13246                 if ((tg3_flag(tp, 57765_PLUS) ||
13247                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13248                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13249                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13250                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13251
13252                 if (tg3_flag(tp, PCI_EXPRESS) &&
13253                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13254                     !tg3_flag(tp, 57765_PLUS)) {
13255                         u32 cfg3;
13256
13257                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13258                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13259                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13260                 }
13261
13262                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13263                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13264                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13265                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13266                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13267                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13268         }
13269 done:
13270         if (tg3_flag(tp, WOL_CAP))
13271                 device_set_wakeup_enable(&tp->pdev->dev,
13272                                          tg3_flag(tp, WOL_ENABLE));
13273         else
13274                 device_set_wakeup_capable(&tp->pdev->dev, false);
13275 }
13276
13277 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13278 {
13279         int i;
13280         u32 val;
13281
13282         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13283         tw32(OTP_CTRL, cmd);
13284
13285         /* Wait for up to 1 ms for command to execute. */
13286         for (i = 0; i < 100; i++) {
13287                 val = tr32(OTP_STATUS);
13288                 if (val & OTP_STATUS_CMD_DONE)
13289                         break;
13290                 udelay(10);
13291         }
13292
13293         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13294 }
13295
13296 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13297  * configuration is a 32-bit value that straddles the alignment boundary.
13298  * We do two 32-bit reads and then shift and merge the results.
13299  */
13300 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13301 {
13302         u32 bhalf_otp, thalf_otp;
13303
13304         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13305
13306         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13307                 return 0;
13308
13309         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13310
13311         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13312                 return 0;
13313
13314         thalf_otp = tr32(OTP_READ_DATA);
13315
13316         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13317
13318         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13319                 return 0;
13320
13321         bhalf_otp = tr32(OTP_READ_DATA);
13322
13323         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13324 }
13325
13326 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13327 {
13328         u32 adv = ADVERTISED_Autoneg;
13329
13330         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13331                 adv |= ADVERTISED_1000baseT_Half |
13332                        ADVERTISED_1000baseT_Full;
13333
13334         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13335                 adv |= ADVERTISED_100baseT_Half |
13336                        ADVERTISED_100baseT_Full |
13337                        ADVERTISED_10baseT_Half |
13338                        ADVERTISED_10baseT_Full |
13339                        ADVERTISED_TP;
13340         else
13341                 adv |= ADVERTISED_FIBRE;
13342
13343         tp->link_config.advertising = adv;
13344         tp->link_config.speed = SPEED_INVALID;
13345         tp->link_config.duplex = DUPLEX_INVALID;
13346         tp->link_config.autoneg = AUTONEG_ENABLE;
13347         tp->link_config.active_speed = SPEED_INVALID;
13348         tp->link_config.active_duplex = DUPLEX_INVALID;
13349         tp->link_config.orig_speed = SPEED_INVALID;
13350         tp->link_config.orig_duplex = DUPLEX_INVALID;
13351         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13352 }
13353
13354 static int __devinit tg3_phy_probe(struct tg3 *tp)
13355 {
13356         u32 hw_phy_id_1, hw_phy_id_2;
13357         u32 hw_phy_id, hw_phy_id_masked;
13358         int err;
13359
13360         /* flow control autonegotiation is default behavior */
13361         tg3_flag_set(tp, PAUSE_AUTONEG);
13362         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13363
13364         if (tg3_flag(tp, USE_PHYLIB))
13365                 return tg3_phy_init(tp);
13366
13367         /* Reading the PHY ID register can conflict with ASF
13368          * firmware access to the PHY hardware.
13369          */
13370         err = 0;
13371         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13372                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13373         } else {
13374                 /* Now read the physical PHY_ID from the chip and verify
13375                  * that it is sane.  If it doesn't look good, we fall back
13376                  * to either the hard-coded table based PHY_ID and failing
13377                  * that the value found in the eeprom area.
13378                  */
13379                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13380                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13381
13382                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13383                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13384                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13385
13386                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13387         }
13388
13389         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13390                 tp->phy_id = hw_phy_id;
13391                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13392                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13393                 else
13394                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13395         } else {
13396                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13397                         /* Do nothing, phy ID already set up in
13398                          * tg3_get_eeprom_hw_cfg().
13399                          */
13400                 } else {
13401                         struct subsys_tbl_ent *p;
13402
13403                         /* No eeprom signature?  Try the hardcoded
13404                          * subsys device table.
13405                          */
13406                         p = tg3_lookup_by_subsys(tp);
13407                         if (!p)
13408                                 return -ENODEV;
13409
13410                         tp->phy_id = p->phy_id;
13411                         if (!tp->phy_id ||
13412                             tp->phy_id == TG3_PHY_ID_BCM8002)
13413                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13414                 }
13415         }
13416
13417         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13418             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13419              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13420              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13421               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13422              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13423               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13424                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13425
13426         tg3_phy_init_link_config(tp);
13427
13428         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13429             !tg3_flag(tp, ENABLE_APE) &&
13430             !tg3_flag(tp, ENABLE_ASF)) {
13431                 u32 bmsr, dummy;
13432
13433                 tg3_readphy(tp, MII_BMSR, &bmsr);
13434                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13435                     (bmsr & BMSR_LSTATUS))
13436                         goto skip_phy_reset;
13437
13438                 err = tg3_phy_reset(tp);
13439                 if (err)
13440                         return err;
13441
13442                 tg3_phy_set_wirespeed(tp);
13443
13444                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13445                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13446                                             tp->link_config.flowctrl);
13447
13448                         tg3_writephy(tp, MII_BMCR,
13449                                      BMCR_ANENABLE | BMCR_ANRESTART);
13450                 }
13451         }
13452
13453 skip_phy_reset:
13454         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13455                 err = tg3_init_5401phy_dsp(tp);
13456                 if (err)
13457                         return err;
13458
13459                 err = tg3_init_5401phy_dsp(tp);
13460         }
13461
13462         return err;
13463 }
13464
13465 static void __devinit tg3_read_vpd(struct tg3 *tp)
13466 {
13467         u8 *vpd_data;
13468         unsigned int block_end, rosize, len;
13469         u32 vpdlen;
13470         int j, i = 0;
13471
13472         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13473         if (!vpd_data)
13474                 goto out_no_vpd;
13475
13476         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13477         if (i < 0)
13478                 goto out_not_found;
13479
13480         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13481         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13482         i += PCI_VPD_LRDT_TAG_SIZE;
13483
13484         if (block_end > vpdlen)
13485                 goto out_not_found;
13486
13487         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13488                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13489         if (j > 0) {
13490                 len = pci_vpd_info_field_size(&vpd_data[j]);
13491
13492                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13493                 if (j + len > block_end || len != 4 ||
13494                     memcmp(&vpd_data[j], "1028", 4))
13495                         goto partno;
13496
13497                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13498                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13499                 if (j < 0)
13500                         goto partno;
13501
13502                 len = pci_vpd_info_field_size(&vpd_data[j]);
13503
13504                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13505                 if (j + len > block_end)
13506                         goto partno;
13507
13508                 memcpy(tp->fw_ver, &vpd_data[j], len);
13509                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13510         }
13511
13512 partno:
13513         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13514                                       PCI_VPD_RO_KEYWORD_PARTNO);
13515         if (i < 0)
13516                 goto out_not_found;
13517
13518         len = pci_vpd_info_field_size(&vpd_data[i]);
13519
13520         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13521         if (len > TG3_BPN_SIZE ||
13522             (len + i) > vpdlen)
13523                 goto out_not_found;
13524
13525         memcpy(tp->board_part_number, &vpd_data[i], len);
13526
13527 out_not_found:
13528         kfree(vpd_data);
13529         if (tp->board_part_number[0])
13530                 return;
13531
13532 out_no_vpd:
13533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13534                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13535                         strcpy(tp->board_part_number, "BCM5717");
13536                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13537                         strcpy(tp->board_part_number, "BCM5718");
13538                 else
13539                         goto nomatch;
13540         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13541                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13542                         strcpy(tp->board_part_number, "BCM57780");
13543                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13544                         strcpy(tp->board_part_number, "BCM57760");
13545                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13546                         strcpy(tp->board_part_number, "BCM57790");
13547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13548                         strcpy(tp->board_part_number, "BCM57788");
13549                 else
13550                         goto nomatch;
13551         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13552                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13553                         strcpy(tp->board_part_number, "BCM57761");
13554                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13555                         strcpy(tp->board_part_number, "BCM57765");
13556                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13557                         strcpy(tp->board_part_number, "BCM57781");
13558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13559                         strcpy(tp->board_part_number, "BCM57785");
13560                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13561                         strcpy(tp->board_part_number, "BCM57791");
13562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13563                         strcpy(tp->board_part_number, "BCM57795");
13564                 else
13565                         goto nomatch;
13566         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13567                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13568                         strcpy(tp->board_part_number, "BCM57762");
13569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13570                         strcpy(tp->board_part_number, "BCM57766");
13571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13572                         strcpy(tp->board_part_number, "BCM57782");
13573                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13574                         strcpy(tp->board_part_number, "BCM57786");
13575                 else
13576                         goto nomatch;
13577         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13578                 strcpy(tp->board_part_number, "BCM95906");
13579         } else {
13580 nomatch:
13581                 strcpy(tp->board_part_number, "none");
13582         }
13583 }
13584
13585 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13586 {
13587         u32 val;
13588
13589         if (tg3_nvram_read(tp, offset, &val) ||
13590             (val & 0xfc000000) != 0x0c000000 ||
13591             tg3_nvram_read(tp, offset + 4, &val) ||
13592             val != 0)
13593                 return 0;
13594
13595         return 1;
13596 }
13597
13598 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13599 {
13600         u32 val, offset, start, ver_offset;
13601         int i, dst_off;
13602         bool newver = false;
13603
13604         if (tg3_nvram_read(tp, 0xc, &offset) ||
13605             tg3_nvram_read(tp, 0x4, &start))
13606                 return;
13607
13608         offset = tg3_nvram_logical_addr(tp, offset);
13609
13610         if (tg3_nvram_read(tp, offset, &val))
13611                 return;
13612
13613         if ((val & 0xfc000000) == 0x0c000000) {
13614                 if (tg3_nvram_read(tp, offset + 4, &val))
13615                         return;
13616
13617                 if (val == 0)
13618                         newver = true;
13619         }
13620
13621         dst_off = strlen(tp->fw_ver);
13622
13623         if (newver) {
13624                 if (TG3_VER_SIZE - dst_off < 16 ||
13625                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13626                         return;
13627
13628                 offset = offset + ver_offset - start;
13629                 for (i = 0; i < 16; i += 4) {
13630                         __be32 v;
13631                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13632                                 return;
13633
13634                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13635                 }
13636         } else {
13637                 u32 major, minor;
13638
13639                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13640                         return;
13641
13642                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13643                         TG3_NVM_BCVER_MAJSFT;
13644                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13645                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13646                          "v%d.%02d", major, minor);
13647         }
13648 }
13649
13650 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13651 {
13652         u32 val, major, minor;
13653
13654         /* Use native endian representation */
13655         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13656                 return;
13657
13658         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13659                 TG3_NVM_HWSB_CFG1_MAJSFT;
13660         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13661                 TG3_NVM_HWSB_CFG1_MINSFT;
13662
13663         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13664 }
13665
13666 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13667 {
13668         u32 offset, major, minor, build;
13669
13670         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13671
13672         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13673                 return;
13674
13675         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13676         case TG3_EEPROM_SB_REVISION_0:
13677                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13678                 break;
13679         case TG3_EEPROM_SB_REVISION_2:
13680                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13681                 break;
13682         case TG3_EEPROM_SB_REVISION_3:
13683                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13684                 break;
13685         case TG3_EEPROM_SB_REVISION_4:
13686                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13687                 break;
13688         case TG3_EEPROM_SB_REVISION_5:
13689                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13690                 break;
13691         case TG3_EEPROM_SB_REVISION_6:
13692                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13693                 break;
13694         default:
13695                 return;
13696         }
13697
13698         if (tg3_nvram_read(tp, offset, &val))
13699                 return;
13700
13701         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13702                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13703         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13704                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13705         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13706
13707         if (minor > 99 || build > 26)
13708                 return;
13709
13710         offset = strlen(tp->fw_ver);
13711         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13712                  " v%d.%02d", major, minor);
13713
13714         if (build > 0) {
13715                 offset = strlen(tp->fw_ver);
13716                 if (offset < TG3_VER_SIZE - 1)
13717                         tp->fw_ver[offset] = 'a' + build - 1;
13718         }
13719 }
13720
13721 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13722 {
13723         u32 val, offset, start;
13724         int i, vlen;
13725
13726         for (offset = TG3_NVM_DIR_START;
13727              offset < TG3_NVM_DIR_END;
13728              offset += TG3_NVM_DIRENT_SIZE) {
13729                 if (tg3_nvram_read(tp, offset, &val))
13730                         return;
13731
13732                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13733                         break;
13734         }
13735
13736         if (offset == TG3_NVM_DIR_END)
13737                 return;
13738
13739         if (!tg3_flag(tp, 5705_PLUS))
13740                 start = 0x08000000;
13741         else if (tg3_nvram_read(tp, offset - 4, &start))
13742                 return;
13743
13744         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13745             !tg3_fw_img_is_valid(tp, offset) ||
13746             tg3_nvram_read(tp, offset + 8, &val))
13747                 return;
13748
13749         offset += val - start;
13750
13751         vlen = strlen(tp->fw_ver);
13752
13753         tp->fw_ver[vlen++] = ',';
13754         tp->fw_ver[vlen++] = ' ';
13755
13756         for (i = 0; i < 4; i++) {
13757                 __be32 v;
13758                 if (tg3_nvram_read_be32(tp, offset, &v))
13759                         return;
13760
13761                 offset += sizeof(v);
13762
13763                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13764                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13765                         break;
13766                 }
13767
13768                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13769                 vlen += sizeof(v);
13770         }
13771 }
13772
13773 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13774 {
13775         int vlen;
13776         u32 apedata;
13777         char *fwtype;
13778
13779         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13780                 return;
13781
13782         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13783         if (apedata != APE_SEG_SIG_MAGIC)
13784                 return;
13785
13786         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13787         if (!(apedata & APE_FW_STATUS_READY))
13788                 return;
13789
13790         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13791
13792         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13793                 tg3_flag_set(tp, APE_HAS_NCSI);
13794                 fwtype = "NCSI";
13795         } else {
13796                 fwtype = "DASH";
13797         }
13798
13799         vlen = strlen(tp->fw_ver);
13800
13801         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13802                  fwtype,
13803                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13804                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13805                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13806                  (apedata & APE_FW_VERSION_BLDMSK));
13807 }
13808
13809 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13810 {
13811         u32 val;
13812         bool vpd_vers = false;
13813
13814         if (tp->fw_ver[0] != 0)
13815                 vpd_vers = true;
13816
13817         if (tg3_flag(tp, NO_NVRAM)) {
13818                 strcat(tp->fw_ver, "sb");
13819                 return;
13820         }
13821
13822         if (tg3_nvram_read(tp, 0, &val))
13823                 return;
13824
13825         if (val == TG3_EEPROM_MAGIC)
13826                 tg3_read_bc_ver(tp);
13827         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13828                 tg3_read_sb_ver(tp, val);
13829         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13830                 tg3_read_hwsb_ver(tp);
13831         else
13832                 return;
13833
13834         if (vpd_vers)
13835                 goto done;
13836
13837         if (tg3_flag(tp, ENABLE_APE)) {
13838                 if (tg3_flag(tp, ENABLE_ASF))
13839                         tg3_read_dash_ver(tp);
13840         } else if (tg3_flag(tp, ENABLE_ASF)) {
13841                 tg3_read_mgmtfw_ver(tp);
13842         }
13843
13844 done:
13845         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13846 }
13847
13848 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13849 {
13850         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13851                 return TG3_RX_RET_MAX_SIZE_5717;
13852         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13853                 return TG3_RX_RET_MAX_SIZE_5700;
13854         else
13855                 return TG3_RX_RET_MAX_SIZE_5705;
13856 }
13857
13858 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13859         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13860         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13861         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13862         { },
13863 };
13864
13865 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13866 {
13867         struct pci_dev *peer;
13868         unsigned int func, devnr = tp->pdev->devfn & ~7;
13869
13870         for (func = 0; func < 8; func++) {
13871                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13872                 if (peer && peer != tp->pdev)
13873                         break;
13874                 pci_dev_put(peer);
13875         }
13876         /* 5704 can be configured in single-port mode, set peer to
13877          * tp->pdev in that case.
13878          */
13879         if (!peer) {
13880                 peer = tp->pdev;
13881                 return peer;
13882         }
13883
13884         /*
13885          * We don't need to keep the refcount elevated; there's no way
13886          * to remove one half of this device without removing the other
13887          */
13888         pci_dev_put(peer);
13889
13890         return peer;
13891 }
13892
13893 static int __devinit tg3_get_invariants(struct tg3 *tp)
13894 {
13895         u32 misc_ctrl_reg;
13896         u32 pci_state_reg, grc_misc_cfg;
13897         u32 val;
13898         u16 pci_cmd;
13899         int err;
13900
13901         /* Force memory write invalidate off.  If we leave it on,
13902          * then on 5700_BX chips we have to enable a workaround.
13903          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13904          * to match the cacheline size.  The Broadcom driver have this
13905          * workaround but turns MWI off all the times so never uses
13906          * it.  This seems to suggest that the workaround is insufficient.
13907          */
13908         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13909         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13910         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13911
13912         /* Important! -- Make sure register accesses are byteswapped
13913          * correctly.  Also, for those chips that require it, make
13914          * sure that indirect register accesses are enabled before
13915          * the first operation.
13916          */
13917         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13918                               &misc_ctrl_reg);
13919         tp->misc_host_ctrl |= (misc_ctrl_reg &
13920                                MISC_HOST_CTRL_CHIPREV);
13921         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13922                                tp->misc_host_ctrl);
13923
13924         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13925                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13927                 u32 prod_id_asic_rev;
13928
13929                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13930                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13931                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13932                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13933                         pci_read_config_dword(tp->pdev,
13934                                               TG3PCI_GEN2_PRODID_ASICREV,
13935                                               &prod_id_asic_rev);
13936                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13937                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13938                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13939                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13940                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13941                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13942                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13943                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13944                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13945                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13946                         pci_read_config_dword(tp->pdev,
13947                                               TG3PCI_GEN15_PRODID_ASICREV,
13948                                               &prod_id_asic_rev);
13949                 else
13950                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13951                                               &prod_id_asic_rev);
13952
13953                 tp->pci_chip_rev_id = prod_id_asic_rev;
13954         }
13955
13956         /* Wrong chip ID in 5752 A0. This code can be removed later
13957          * as A0 is not in production.
13958          */
13959         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13960                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13961
13962         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13963          * we need to disable memory and use config. cycles
13964          * only to access all registers. The 5702/03 chips
13965          * can mistakenly decode the special cycles from the
13966          * ICH chipsets as memory write cycles, causing corruption
13967          * of register and memory space. Only certain ICH bridges
13968          * will drive special cycles with non-zero data during the
13969          * address phase which can fall within the 5703's address
13970          * range. This is not an ICH bug as the PCI spec allows
13971          * non-zero address during special cycles. However, only
13972          * these ICH bridges are known to drive non-zero addresses
13973          * during special cycles.
13974          *
13975          * Since special cycles do not cross PCI bridges, we only
13976          * enable this workaround if the 5703 is on the secondary
13977          * bus of these ICH bridges.
13978          */
13979         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13980             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13981                 static struct tg3_dev_id {
13982                         u32     vendor;
13983                         u32     device;
13984                         u32     rev;
13985                 } ich_chipsets[] = {
13986                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13987                           PCI_ANY_ID },
13988                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13989                           PCI_ANY_ID },
13990                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13991                           0xa },
13992                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13993                           PCI_ANY_ID },
13994                         { },
13995                 };
13996                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13997                 struct pci_dev *bridge = NULL;
13998
13999                 while (pci_id->vendor != 0) {
14000                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14001                                                 bridge);
14002                         if (!bridge) {
14003                                 pci_id++;
14004                                 continue;
14005                         }
14006                         if (pci_id->rev != PCI_ANY_ID) {
14007                                 if (bridge->revision > pci_id->rev)
14008                                         continue;
14009                         }
14010                         if (bridge->subordinate &&
14011                             (bridge->subordinate->number ==
14012                              tp->pdev->bus->number)) {
14013                                 tg3_flag_set(tp, ICH_WORKAROUND);
14014                                 pci_dev_put(bridge);
14015                                 break;
14016                         }
14017                 }
14018         }
14019
14020         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14021                 static struct tg3_dev_id {
14022                         u32     vendor;
14023                         u32     device;
14024                 } bridge_chipsets[] = {
14025                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14026                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14027                         { },
14028                 };
14029                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14030                 struct pci_dev *bridge = NULL;
14031
14032                 while (pci_id->vendor != 0) {
14033                         bridge = pci_get_device(pci_id->vendor,
14034                                                 pci_id->device,
14035                                                 bridge);
14036                         if (!bridge) {
14037                                 pci_id++;
14038                                 continue;
14039                         }
14040                         if (bridge->subordinate &&
14041                             (bridge->subordinate->number <=
14042                              tp->pdev->bus->number) &&
14043                             (bridge->subordinate->subordinate >=
14044                              tp->pdev->bus->number)) {
14045                                 tg3_flag_set(tp, 5701_DMA_BUG);
14046                                 pci_dev_put(bridge);
14047                                 break;
14048                         }
14049                 }
14050         }
14051
14052         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14053          * DMA addresses > 40-bit. This bridge may have other additional
14054          * 57xx devices behind it in some 4-port NIC designs for example.
14055          * Any tg3 device found behind the bridge will also need the 40-bit
14056          * DMA workaround.
14057          */
14058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14059             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14060                 tg3_flag_set(tp, 5780_CLASS);
14061                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14062                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14063         } else {
14064                 struct pci_dev *bridge = NULL;
14065
14066                 do {
14067                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14068                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14069                                                 bridge);
14070                         if (bridge && bridge->subordinate &&
14071                             (bridge->subordinate->number <=
14072                              tp->pdev->bus->number) &&
14073                             (bridge->subordinate->subordinate >=
14074                              tp->pdev->bus->number)) {
14075                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14076                                 pci_dev_put(bridge);
14077                                 break;
14078                         }
14079                 } while (bridge);
14080         }
14081
14082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14083             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14084                 tp->pdev_peer = tg3_find_peer(tp);
14085
14086         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14089                 tg3_flag_set(tp, 5717_PLUS);
14090
14091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14092             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14093                 tg3_flag_set(tp, 57765_CLASS);
14094
14095         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14096                 tg3_flag_set(tp, 57765_PLUS);
14097
14098         /* Intentionally exclude ASIC_REV_5906 */
14099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14102             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14105             tg3_flag(tp, 57765_PLUS))
14106                 tg3_flag_set(tp, 5755_PLUS);
14107
14108         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14109             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14111             tg3_flag(tp, 5755_PLUS) ||
14112             tg3_flag(tp, 5780_CLASS))
14113                 tg3_flag_set(tp, 5750_PLUS);
14114
14115         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14116             tg3_flag(tp, 5750_PLUS))
14117                 tg3_flag_set(tp, 5705_PLUS);
14118
14119         /* Determine TSO capabilities */
14120         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14121                 ; /* Do nothing. HW bug. */
14122         else if (tg3_flag(tp, 57765_PLUS))
14123                 tg3_flag_set(tp, HW_TSO_3);
14124         else if (tg3_flag(tp, 5755_PLUS) ||
14125                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14126                 tg3_flag_set(tp, HW_TSO_2);
14127         else if (tg3_flag(tp, 5750_PLUS)) {
14128                 tg3_flag_set(tp, HW_TSO_1);
14129                 tg3_flag_set(tp, TSO_BUG);
14130                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14131                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14132                         tg3_flag_clear(tp, TSO_BUG);
14133         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14134                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14135                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14136                         tg3_flag_set(tp, TSO_BUG);
14137                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14138                         tp->fw_needed = FIRMWARE_TG3TSO5;
14139                 else
14140                         tp->fw_needed = FIRMWARE_TG3TSO;
14141         }
14142
14143         /* Selectively allow TSO based on operating conditions */
14144         if (tg3_flag(tp, HW_TSO_1) ||
14145             tg3_flag(tp, HW_TSO_2) ||
14146             tg3_flag(tp, HW_TSO_3) ||
14147             tp->fw_needed) {
14148                 /* For firmware TSO, assume ASF is disabled.
14149                  * We'll disable TSO later if we discover ASF
14150                  * is enabled in tg3_get_eeprom_hw_cfg().
14151                  */
14152                 tg3_flag_set(tp, TSO_CAPABLE);
14153         } else {
14154                 tg3_flag_clear(tp, TSO_CAPABLE);
14155                 tg3_flag_clear(tp, TSO_BUG);
14156                 tp->fw_needed = NULL;
14157         }
14158
14159         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14160                 tp->fw_needed = FIRMWARE_TG3;
14161
14162         tp->irq_max = 1;
14163
14164         if (tg3_flag(tp, 5750_PLUS)) {
14165                 tg3_flag_set(tp, SUPPORT_MSI);
14166                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14167                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14168                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14169                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14170                      tp->pdev_peer == tp->pdev))
14171                         tg3_flag_clear(tp, SUPPORT_MSI);
14172
14173                 if (tg3_flag(tp, 5755_PLUS) ||
14174                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14175                         tg3_flag_set(tp, 1SHOT_MSI);
14176                 }
14177
14178                 if (tg3_flag(tp, 57765_PLUS)) {
14179                         tg3_flag_set(tp, SUPPORT_MSIX);
14180                         tp->irq_max = TG3_IRQ_MAX_VECS;
14181                         tg3_rss_init_dflt_indir_tbl(tp);
14182                 }
14183         }
14184
14185         if (tg3_flag(tp, 5755_PLUS))
14186                 tg3_flag_set(tp, SHORT_DMA_BUG);
14187
14188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14189                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14190
14191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14192             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14193             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14194                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14195
14196         if (tg3_flag(tp, 57765_PLUS) &&
14197             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14198                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14199
14200         if (!tg3_flag(tp, 5705_PLUS) ||
14201             tg3_flag(tp, 5780_CLASS) ||
14202             tg3_flag(tp, USE_JUMBO_BDFLAG))
14203                 tg3_flag_set(tp, JUMBO_CAPABLE);
14204
14205         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14206                               &pci_state_reg);
14207
14208         if (pci_is_pcie(tp->pdev)) {
14209                 u16 lnkctl;
14210
14211                 tg3_flag_set(tp, PCI_EXPRESS);
14212
14213                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14214                         int readrq = pcie_get_readrq(tp->pdev);
14215                         if (readrq > 2048)
14216                                 pcie_set_readrq(tp->pdev, 2048);
14217                 }
14218
14219                 pci_read_config_word(tp->pdev,
14220                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14221                                      &lnkctl);
14222                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14223                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14224                             ASIC_REV_5906) {
14225                                 tg3_flag_clear(tp, HW_TSO_2);
14226                                 tg3_flag_clear(tp, TSO_CAPABLE);
14227                         }
14228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14229                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14230                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14231                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14232                                 tg3_flag_set(tp, CLKREQ_BUG);
14233                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14234                         tg3_flag_set(tp, L1PLLPD_EN);
14235                 }
14236         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14237                 /* BCM5785 devices are effectively PCIe devices, and should
14238                  * follow PCIe codepaths, but do not have a PCIe capabilities
14239                  * section.
14240                  */
14241                 tg3_flag_set(tp, PCI_EXPRESS);
14242         } else if (!tg3_flag(tp, 5705_PLUS) ||
14243                    tg3_flag(tp, 5780_CLASS)) {
14244                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14245                 if (!tp->pcix_cap) {
14246                         dev_err(&tp->pdev->dev,
14247                                 "Cannot find PCI-X capability, aborting\n");
14248                         return -EIO;
14249                 }
14250
14251                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14252                         tg3_flag_set(tp, PCIX_MODE);
14253         }
14254
14255         /* If we have an AMD 762 or VIA K8T800 chipset, write
14256          * reordering to the mailbox registers done by the host
14257          * controller can cause major troubles.  We read back from
14258          * every mailbox register write to force the writes to be
14259          * posted to the chip in order.
14260          */
14261         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14262             !tg3_flag(tp, PCI_EXPRESS))
14263                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14264
14265         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14266                              &tp->pci_cacheline_sz);
14267         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14268                              &tp->pci_lat_timer);
14269         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14270             tp->pci_lat_timer < 64) {
14271                 tp->pci_lat_timer = 64;
14272                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14273                                       tp->pci_lat_timer);
14274         }
14275
14276         /* Important! -- It is critical that the PCI-X hw workaround
14277          * situation is decided before the first MMIO register access.
14278          */
14279         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14280                 /* 5700 BX chips need to have their TX producer index
14281                  * mailboxes written twice to workaround a bug.
14282                  */
14283                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14284
14285                 /* If we are in PCI-X mode, enable register write workaround.
14286                  *
14287                  * The workaround is to use indirect register accesses
14288                  * for all chip writes not to mailbox registers.
14289                  */
14290                 if (tg3_flag(tp, PCIX_MODE)) {
14291                         u32 pm_reg;
14292
14293                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14294
14295                         /* The chip can have it's power management PCI config
14296                          * space registers clobbered due to this bug.
14297                          * So explicitly force the chip into D0 here.
14298                          */
14299                         pci_read_config_dword(tp->pdev,
14300                                               tp->pm_cap + PCI_PM_CTRL,
14301                                               &pm_reg);
14302                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14303                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14304                         pci_write_config_dword(tp->pdev,
14305                                                tp->pm_cap + PCI_PM_CTRL,
14306                                                pm_reg);
14307
14308                         /* Also, force SERR#/PERR# in PCI command. */
14309                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14310                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14311                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14312                 }
14313         }
14314
14315         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14316                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14317         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14318                 tg3_flag_set(tp, PCI_32BIT);
14319
14320         /* Chip-specific fixup from Broadcom driver */
14321         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14322             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14323                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14324                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14325         }
14326
14327         /* Default fast path register access methods */
14328         tp->read32 = tg3_read32;
14329         tp->write32 = tg3_write32;
14330         tp->read32_mbox = tg3_read32;
14331         tp->write32_mbox = tg3_write32;
14332         tp->write32_tx_mbox = tg3_write32;
14333         tp->write32_rx_mbox = tg3_write32;
14334
14335         /* Various workaround register access methods */
14336         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14337                 tp->write32 = tg3_write_indirect_reg32;
14338         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14339                  (tg3_flag(tp, PCI_EXPRESS) &&
14340                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14341                 /*
14342                  * Back to back register writes can cause problems on these
14343                  * chips, the workaround is to read back all reg writes
14344                  * except those to mailbox regs.
14345                  *
14346                  * See tg3_write_indirect_reg32().
14347                  */
14348                 tp->write32 = tg3_write_flush_reg32;
14349         }
14350
14351         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14352                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14353                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14354                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14355         }
14356
14357         if (tg3_flag(tp, ICH_WORKAROUND)) {
14358                 tp->read32 = tg3_read_indirect_reg32;
14359                 tp->write32 = tg3_write_indirect_reg32;
14360                 tp->read32_mbox = tg3_read_indirect_mbox;
14361                 tp->write32_mbox = tg3_write_indirect_mbox;
14362                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14363                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14364
14365                 iounmap(tp->regs);
14366                 tp->regs = NULL;
14367
14368                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14369                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14370                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14371         }
14372         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14373                 tp->read32_mbox = tg3_read32_mbox_5906;
14374                 tp->write32_mbox = tg3_write32_mbox_5906;
14375                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14376                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14377         }
14378
14379         if (tp->write32 == tg3_write_indirect_reg32 ||
14380             (tg3_flag(tp, PCIX_MODE) &&
14381              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14382               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14383                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14384
14385         /* The memory arbiter has to be enabled in order for SRAM accesses
14386          * to succeed.  Normally on powerup the tg3 chip firmware will make
14387          * sure it is enabled, but other entities such as system netboot
14388          * code might disable it.
14389          */
14390         val = tr32(MEMARB_MODE);
14391         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14392
14393         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14395             tg3_flag(tp, 5780_CLASS)) {
14396                 if (tg3_flag(tp, PCIX_MODE)) {
14397                         pci_read_config_dword(tp->pdev,
14398                                               tp->pcix_cap + PCI_X_STATUS,
14399                                               &val);
14400                         tp->pci_fn = val & 0x7;
14401                 }
14402         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14403                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14404                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14405                     NIC_SRAM_CPMUSTAT_SIG) {
14406                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14407                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14408                 }
14409         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14410                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14411                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14412                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14413                     NIC_SRAM_CPMUSTAT_SIG) {
14414                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14415                                      TG3_CPMU_STATUS_FSHFT_5719;
14416                 }
14417         }
14418
14419         /* Get eeprom hw config before calling tg3_set_power_state().
14420          * In particular, the TG3_FLAG_IS_NIC flag must be
14421          * determined before calling tg3_set_power_state() so that
14422          * we know whether or not to switch out of Vaux power.
14423          * When the flag is set, it means that GPIO1 is used for eeprom
14424          * write protect and also implies that it is a LOM where GPIOs
14425          * are not used to switch power.
14426          */
14427         tg3_get_eeprom_hw_cfg(tp);
14428
14429         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14430                 tg3_flag_clear(tp, TSO_CAPABLE);
14431                 tg3_flag_clear(tp, TSO_BUG);
14432                 tp->fw_needed = NULL;
14433         }
14434
14435         if (tg3_flag(tp, ENABLE_APE)) {
14436                 /* Allow reads and writes to the
14437                  * APE register and memory space.
14438                  */
14439                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14440                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14441                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14442                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14443                                        pci_state_reg);
14444
14445                 tg3_ape_lock_init(tp);
14446         }
14447
14448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14450             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14451             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14452             tg3_flag(tp, 57765_PLUS))
14453                 tg3_flag_set(tp, CPMU_PRESENT);
14454
14455         /* Set up tp->grc_local_ctrl before calling
14456          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14457          * will bring 5700's external PHY out of reset.
14458          * It is also used as eeprom write protect on LOMs.
14459          */
14460         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14461         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14462             tg3_flag(tp, EEPROM_WRITE_PROT))
14463                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14464                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14465         /* Unused GPIO3 must be driven as output on 5752 because there
14466          * are no pull-up resistors on unused GPIO pins.
14467          */
14468         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14469                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14470
14471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14473             tg3_flag(tp, 57765_CLASS))
14474                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14475
14476         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14477             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14478                 /* Turn off the debug UART. */
14479                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14480                 if (tg3_flag(tp, IS_NIC))
14481                         /* Keep VMain power. */
14482                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14483                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14484         }
14485
14486         /* Switch out of Vaux if it is a NIC */
14487         tg3_pwrsrc_switch_to_vmain(tp);
14488
14489         /* Derive initial jumbo mode from MTU assigned in
14490          * ether_setup() via the alloc_etherdev() call
14491          */
14492         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14493                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14494
14495         /* Determine WakeOnLan speed to use. */
14496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14497             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14498             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14499             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14500                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14501         } else {
14502                 tg3_flag_set(tp, WOL_SPEED_100MB);
14503         }
14504
14505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14506                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14507
14508         /* A few boards don't want Ethernet@WireSpeed phy feature */
14509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14510             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14511              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14512              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14513             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14514             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14515                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14516
14517         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14518             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14519                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14520         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14521                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14522
14523         if (tg3_flag(tp, 5705_PLUS) &&
14524             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14525             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14526             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14527             !tg3_flag(tp, 57765_PLUS)) {
14528                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14529                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14530                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14531                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14532                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14533                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14534                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14535                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14536                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14537                 } else
14538                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14539         }
14540
14541         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14542             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14543                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14544                 if (tp->phy_otp == 0)
14545                         tp->phy_otp = TG3_OTP_DEFAULT;
14546         }
14547
14548         if (tg3_flag(tp, CPMU_PRESENT))
14549                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14550         else
14551                 tp->mi_mode = MAC_MI_MODE_BASE;
14552
14553         tp->coalesce_mode = 0;
14554         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14555             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14556                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14557
14558         /* Set these bits to enable statistics workaround. */
14559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14560             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14561             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14562                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14563                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14564         }
14565
14566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14567             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14568                 tg3_flag_set(tp, USE_PHYLIB);
14569
14570         err = tg3_mdio_init(tp);
14571         if (err)
14572                 return err;
14573
14574         /* Initialize data/descriptor byte/word swapping. */
14575         val = tr32(GRC_MODE);
14576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14577                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14578                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14579                         GRC_MODE_B2HRX_ENABLE |
14580                         GRC_MODE_HTX2B_ENABLE |
14581                         GRC_MODE_HOST_STACKUP);
14582         else
14583                 val &= GRC_MODE_HOST_STACKUP;
14584
14585         tw32(GRC_MODE, val | tp->grc_mode);
14586
14587         tg3_switch_clocks(tp);
14588
14589         /* Clear this out for sanity. */
14590         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14591
14592         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14593                               &pci_state_reg);
14594         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14595             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14596                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14597
14598                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14599                     chiprevid == CHIPREV_ID_5701_B0 ||
14600                     chiprevid == CHIPREV_ID_5701_B2 ||
14601                     chiprevid == CHIPREV_ID_5701_B5) {
14602                         void __iomem *sram_base;
14603
14604                         /* Write some dummy words into the SRAM status block
14605                          * area, see if it reads back correctly.  If the return
14606                          * value is bad, force enable the PCIX workaround.
14607                          */
14608                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14609
14610                         writel(0x00000000, sram_base);
14611                         writel(0x00000000, sram_base + 4);
14612                         writel(0xffffffff, sram_base + 4);
14613                         if (readl(sram_base) != 0x00000000)
14614                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14615                 }
14616         }
14617
14618         udelay(50);
14619         tg3_nvram_init(tp);
14620
14621         grc_misc_cfg = tr32(GRC_MISC_CFG);
14622         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14623
14624         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14625             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14626              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14627                 tg3_flag_set(tp, IS_5788);
14628
14629         if (!tg3_flag(tp, IS_5788) &&
14630             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14631                 tg3_flag_set(tp, TAGGED_STATUS);
14632         if (tg3_flag(tp, TAGGED_STATUS)) {
14633                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14634                                       HOSTCC_MODE_CLRTICK_TXBD);
14635
14636                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14637                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14638                                        tp->misc_host_ctrl);
14639         }
14640
14641         /* Preserve the APE MAC_MODE bits */
14642         if (tg3_flag(tp, ENABLE_APE))
14643                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14644         else
14645                 tp->mac_mode = 0;
14646
14647         /* these are limited to 10/100 only */
14648         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14649              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14650             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14651              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14652              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14653               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14654               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14655             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14656              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14657               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14658               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14659             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14660             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14661             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14662             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14663                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14664
14665         err = tg3_phy_probe(tp);
14666         if (err) {
14667                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14668                 /* ... but do not return immediately ... */
14669                 tg3_mdio_fini(tp);
14670         }
14671
14672         tg3_read_vpd(tp);
14673         tg3_read_fw_ver(tp);
14674
14675         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14676                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14677         } else {
14678                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14679                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14680                 else
14681                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14682         }
14683
14684         /* 5700 {AX,BX} chips have a broken status block link
14685          * change bit implementation, so we must use the
14686          * status register in those cases.
14687          */
14688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14689                 tg3_flag_set(tp, USE_LINKCHG_REG);
14690         else
14691                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14692
14693         /* The led_ctrl is set during tg3_phy_probe, here we might
14694          * have to force the link status polling mechanism based
14695          * upon subsystem IDs.
14696          */
14697         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14698             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14699             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14700                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14701                 tg3_flag_set(tp, USE_LINKCHG_REG);
14702         }
14703
14704         /* For all SERDES we poll the MAC status register. */
14705         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14706                 tg3_flag_set(tp, POLL_SERDES);
14707         else
14708                 tg3_flag_clear(tp, POLL_SERDES);
14709
14710         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14711         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14712         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14713             tg3_flag(tp, PCIX_MODE)) {
14714                 tp->rx_offset = NET_SKB_PAD;
14715 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14716                 tp->rx_copy_thresh = ~(u16)0;
14717 #endif
14718         }
14719
14720         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14721         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14722         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14723
14724         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14725
14726         /* Increment the rx prod index on the rx std ring by at most
14727          * 8 for these chips to workaround hw errata.
14728          */
14729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14730             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14731             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14732                 tp->rx_std_max_post = 8;
14733
14734         if (tg3_flag(tp, ASPM_WORKAROUND))
14735                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14736                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14737
14738         return err;
14739 }
14740
14741 #ifdef CONFIG_SPARC
14742 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14743 {
14744         struct net_device *dev = tp->dev;
14745         struct pci_dev *pdev = tp->pdev;
14746         struct device_node *dp = pci_device_to_OF_node(pdev);
14747         const unsigned char *addr;
14748         int len;
14749
14750         addr = of_get_property(dp, "local-mac-address", &len);
14751         if (addr && len == 6) {
14752                 memcpy(dev->dev_addr, addr, 6);
14753                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14754                 return 0;
14755         }
14756         return -ENODEV;
14757 }
14758
14759 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14760 {
14761         struct net_device *dev = tp->dev;
14762
14763         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14764         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14765         return 0;
14766 }
14767 #endif
14768
14769 static int __devinit tg3_get_device_address(struct tg3 *tp)
14770 {
14771         struct net_device *dev = tp->dev;
14772         u32 hi, lo, mac_offset;
14773         int addr_ok = 0;
14774
14775 #ifdef CONFIG_SPARC
14776         if (!tg3_get_macaddr_sparc(tp))
14777                 return 0;
14778 #endif
14779
14780         mac_offset = 0x7c;
14781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14782             tg3_flag(tp, 5780_CLASS)) {
14783                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14784                         mac_offset = 0xcc;
14785                 if (tg3_nvram_lock(tp))
14786                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14787                 else
14788                         tg3_nvram_unlock(tp);
14789         } else if (tg3_flag(tp, 5717_PLUS)) {
14790                 if (tp->pci_fn & 1)
14791                         mac_offset = 0xcc;
14792                 if (tp->pci_fn > 1)
14793                         mac_offset += 0x18c;
14794         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14795                 mac_offset = 0x10;
14796
14797         /* First try to get it from MAC address mailbox. */
14798         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14799         if ((hi >> 16) == 0x484b) {
14800                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14801                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14802
14803                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14804                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14805                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14806                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14807                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14808
14809                 /* Some old bootcode may report a 0 MAC address in SRAM */
14810                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14811         }
14812         if (!addr_ok) {
14813                 /* Next, try NVRAM. */
14814                 if (!tg3_flag(tp, NO_NVRAM) &&
14815                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14816                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14817                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14818                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14819                 }
14820                 /* Finally just fetch it out of the MAC control regs. */
14821                 else {
14822                         hi = tr32(MAC_ADDR_0_HIGH);
14823                         lo = tr32(MAC_ADDR_0_LOW);
14824
14825                         dev->dev_addr[5] = lo & 0xff;
14826                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14827                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14828                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14829                         dev->dev_addr[1] = hi & 0xff;
14830                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14831                 }
14832         }
14833
14834         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14835 #ifdef CONFIG_SPARC
14836                 if (!tg3_get_default_macaddr_sparc(tp))
14837                         return 0;
14838 #endif
14839                 return -EINVAL;
14840         }
14841         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14842         return 0;
14843 }
14844
14845 #define BOUNDARY_SINGLE_CACHELINE       1
14846 #define BOUNDARY_MULTI_CACHELINE        2
14847
14848 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14849 {
14850         int cacheline_size;
14851         u8 byte;
14852         int goal;
14853
14854         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14855         if (byte == 0)
14856                 cacheline_size = 1024;
14857         else
14858                 cacheline_size = (int) byte * 4;
14859
14860         /* On 5703 and later chips, the boundary bits have no
14861          * effect.
14862          */
14863         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14864             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14865             !tg3_flag(tp, PCI_EXPRESS))
14866                 goto out;
14867
14868 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14869         goal = BOUNDARY_MULTI_CACHELINE;
14870 #else
14871 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14872         goal = BOUNDARY_SINGLE_CACHELINE;
14873 #else
14874         goal = 0;
14875 #endif
14876 #endif
14877
14878         if (tg3_flag(tp, 57765_PLUS)) {
14879                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14880                 goto out;
14881         }
14882
14883         if (!goal)
14884                 goto out;
14885
14886         /* PCI controllers on most RISC systems tend to disconnect
14887          * when a device tries to burst across a cache-line boundary.
14888          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14889          *
14890          * Unfortunately, for PCI-E there are only limited
14891          * write-side controls for this, and thus for reads
14892          * we will still get the disconnects.  We'll also waste
14893          * these PCI cycles for both read and write for chips
14894          * other than 5700 and 5701 which do not implement the
14895          * boundary bits.
14896          */
14897         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14898                 switch (cacheline_size) {
14899                 case 16:
14900                 case 32:
14901                 case 64:
14902                 case 128:
14903                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14904                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14905                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14906                         } else {
14907                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14908                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14909                         }
14910                         break;
14911
14912                 case 256:
14913                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14914                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14915                         break;
14916
14917                 default:
14918                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14919                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14920                         break;
14921                 }
14922         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14923                 switch (cacheline_size) {
14924                 case 16:
14925                 case 32:
14926                 case 64:
14927                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14928                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14929                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14930                                 break;
14931                         }
14932                         /* fallthrough */
14933                 case 128:
14934                 default:
14935                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14936                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14937                         break;
14938                 }
14939         } else {
14940                 switch (cacheline_size) {
14941                 case 16:
14942                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14943                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14944                                         DMA_RWCTRL_WRITE_BNDRY_16);
14945                                 break;
14946                         }
14947                         /* fallthrough */
14948                 case 32:
14949                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14950                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14951                                         DMA_RWCTRL_WRITE_BNDRY_32);
14952                                 break;
14953                         }
14954                         /* fallthrough */
14955                 case 64:
14956                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14957                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14958                                         DMA_RWCTRL_WRITE_BNDRY_64);
14959                                 break;
14960                         }
14961                         /* fallthrough */
14962                 case 128:
14963                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14964                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14965                                         DMA_RWCTRL_WRITE_BNDRY_128);
14966                                 break;
14967                         }
14968                         /* fallthrough */
14969                 case 256:
14970                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14971                                 DMA_RWCTRL_WRITE_BNDRY_256);
14972                         break;
14973                 case 512:
14974                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14975                                 DMA_RWCTRL_WRITE_BNDRY_512);
14976                         break;
14977                 case 1024:
14978                 default:
14979                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14980                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14981                         break;
14982                 }
14983         }
14984
14985 out:
14986         return val;
14987 }
14988
14989 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14990 {
14991         struct tg3_internal_buffer_desc test_desc;
14992         u32 sram_dma_descs;
14993         int i, ret;
14994
14995         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14996
14997         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14998         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14999         tw32(RDMAC_STATUS, 0);
15000         tw32(WDMAC_STATUS, 0);
15001
15002         tw32(BUFMGR_MODE, 0);
15003         tw32(FTQ_RESET, 0);
15004
15005         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15006         test_desc.addr_lo = buf_dma & 0xffffffff;
15007         test_desc.nic_mbuf = 0x00002100;
15008         test_desc.len = size;
15009
15010         /*
15011          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15012          * the *second* time the tg3 driver was getting loaded after an
15013          * initial scan.
15014          *
15015          * Broadcom tells me:
15016          *   ...the DMA engine is connected to the GRC block and a DMA
15017          *   reset may affect the GRC block in some unpredictable way...
15018          *   The behavior of resets to individual blocks has not been tested.
15019          *
15020          * Broadcom noted the GRC reset will also reset all sub-components.
15021          */
15022         if (to_device) {
15023                 test_desc.cqid_sqid = (13 << 8) | 2;
15024
15025                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15026                 udelay(40);
15027         } else {
15028                 test_desc.cqid_sqid = (16 << 8) | 7;
15029
15030                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15031                 udelay(40);
15032         }
15033         test_desc.flags = 0x00000005;
15034
15035         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15036                 u32 val;
15037
15038                 val = *(((u32 *)&test_desc) + i);
15039                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15040                                        sram_dma_descs + (i * sizeof(u32)));
15041                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15042         }
15043         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15044
15045         if (to_device)
15046                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15047         else
15048                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15049
15050         ret = -ENODEV;
15051         for (i = 0; i < 40; i++) {
15052                 u32 val;
15053
15054                 if (to_device)
15055                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15056                 else
15057                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15058                 if ((val & 0xffff) == sram_dma_descs) {
15059                         ret = 0;
15060                         break;
15061                 }
15062
15063                 udelay(100);
15064         }
15065
15066         return ret;
15067 }
15068
15069 #define TEST_BUFFER_SIZE        0x2000
15070
15071 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15072         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15073         { },
15074 };
15075
15076 static int __devinit tg3_test_dma(struct tg3 *tp)
15077 {
15078         dma_addr_t buf_dma;
15079         u32 *buf, saved_dma_rwctrl;
15080         int ret = 0;
15081
15082         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15083                                  &buf_dma, GFP_KERNEL);
15084         if (!buf) {
15085                 ret = -ENOMEM;
15086                 goto out_nofree;
15087         }
15088
15089         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15090                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15091
15092         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15093
15094         if (tg3_flag(tp, 57765_PLUS))
15095                 goto out;
15096
15097         if (tg3_flag(tp, PCI_EXPRESS)) {
15098                 /* DMA read watermark not used on PCIE */
15099                 tp->dma_rwctrl |= 0x00180000;
15100         } else if (!tg3_flag(tp, PCIX_MODE)) {
15101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15102                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15103                         tp->dma_rwctrl |= 0x003f0000;
15104                 else
15105                         tp->dma_rwctrl |= 0x003f000f;
15106         } else {
15107                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15108                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15109                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15110                         u32 read_water = 0x7;
15111
15112                         /* If the 5704 is behind the EPB bridge, we can
15113                          * do the less restrictive ONE_DMA workaround for
15114                          * better performance.
15115                          */
15116                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15117                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15118                                 tp->dma_rwctrl |= 0x8000;
15119                         else if (ccval == 0x6 || ccval == 0x7)
15120                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15121
15122                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15123                                 read_water = 4;
15124                         /* Set bit 23 to enable PCIX hw bug fix */
15125                         tp->dma_rwctrl |=
15126                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15127                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15128                                 (1 << 23);
15129                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15130                         /* 5780 always in PCIX mode */
15131                         tp->dma_rwctrl |= 0x00144000;
15132                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15133                         /* 5714 always in PCIX mode */
15134                         tp->dma_rwctrl |= 0x00148000;
15135                 } else {
15136                         tp->dma_rwctrl |= 0x001b000f;
15137                 }
15138         }
15139
15140         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15141             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15142                 tp->dma_rwctrl &= 0xfffffff0;
15143
15144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15146                 /* Remove this if it causes problems for some boards. */
15147                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15148
15149                 /* On 5700/5701 chips, we need to set this bit.
15150                  * Otherwise the chip will issue cacheline transactions
15151                  * to streamable DMA memory with not all the byte
15152                  * enables turned on.  This is an error on several
15153                  * RISC PCI controllers, in particular sparc64.
15154                  *
15155                  * On 5703/5704 chips, this bit has been reassigned
15156                  * a different meaning.  In particular, it is used
15157                  * on those chips to enable a PCI-X workaround.
15158                  */
15159                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15160         }
15161
15162         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15163
15164 #if 0
15165         /* Unneeded, already done by tg3_get_invariants.  */
15166         tg3_switch_clocks(tp);
15167 #endif
15168
15169         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15170             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15171                 goto out;
15172
15173         /* It is best to perform DMA test with maximum write burst size
15174          * to expose the 5700/5701 write DMA bug.
15175          */
15176         saved_dma_rwctrl = tp->dma_rwctrl;
15177         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15178         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15179
15180         while (1) {
15181                 u32 *p = buf, i;
15182
15183                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15184                         p[i] = i;
15185
15186                 /* Send the buffer to the chip. */
15187                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15188                 if (ret) {
15189                         dev_err(&tp->pdev->dev,
15190                                 "%s: Buffer write failed. err = %d\n",
15191                                 __func__, ret);
15192                         break;
15193                 }
15194
15195 #if 0
15196                 /* validate data reached card RAM correctly. */
15197                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15198                         u32 val;
15199                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15200                         if (le32_to_cpu(val) != p[i]) {
15201                                 dev_err(&tp->pdev->dev,
15202                                         "%s: Buffer corrupted on device! "
15203                                         "(%d != %d)\n", __func__, val, i);
15204                                 /* ret = -ENODEV here? */
15205                         }
15206                         p[i] = 0;
15207                 }
15208 #endif
15209                 /* Now read it back. */
15210                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15211                 if (ret) {
15212                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15213                                 "err = %d\n", __func__, ret);
15214                         break;
15215                 }
15216
15217                 /* Verify it. */
15218                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15219                         if (p[i] == i)
15220                                 continue;
15221
15222                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15223                             DMA_RWCTRL_WRITE_BNDRY_16) {
15224                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15225                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15226                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15227                                 break;
15228                         } else {
15229                                 dev_err(&tp->pdev->dev,
15230                                         "%s: Buffer corrupted on read back! "
15231                                         "(%d != %d)\n", __func__, p[i], i);
15232                                 ret = -ENODEV;
15233                                 goto out;
15234                         }
15235                 }
15236
15237                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15238                         /* Success. */
15239                         ret = 0;
15240                         break;
15241                 }
15242         }
15243         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15244             DMA_RWCTRL_WRITE_BNDRY_16) {
15245                 /* DMA test passed without adjusting DMA boundary,
15246                  * now look for chipsets that are known to expose the
15247                  * DMA bug without failing the test.
15248                  */
15249                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15250                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15251                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15252                 } else {
15253                         /* Safe to use the calculated DMA boundary. */
15254                         tp->dma_rwctrl = saved_dma_rwctrl;
15255                 }
15256
15257                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15258         }
15259
15260 out:
15261         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15262 out_nofree:
15263         return ret;
15264 }
15265
15266 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15267 {
15268         if (tg3_flag(tp, 57765_PLUS)) {
15269                 tp->bufmgr_config.mbuf_read_dma_low_water =
15270                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15271                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15272                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15273                 tp->bufmgr_config.mbuf_high_water =
15274                         DEFAULT_MB_HIGH_WATER_57765;
15275
15276                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15277                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15278                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15279                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15280                 tp->bufmgr_config.mbuf_high_water_jumbo =
15281                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15282         } else if (tg3_flag(tp, 5705_PLUS)) {
15283                 tp->bufmgr_config.mbuf_read_dma_low_water =
15284                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15285                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15286                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15287                 tp->bufmgr_config.mbuf_high_water =
15288                         DEFAULT_MB_HIGH_WATER_5705;
15289                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15290                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15291                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15292                         tp->bufmgr_config.mbuf_high_water =
15293                                 DEFAULT_MB_HIGH_WATER_5906;
15294                 }
15295
15296                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15297                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15298                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15299                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15300                 tp->bufmgr_config.mbuf_high_water_jumbo =
15301                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15302         } else {
15303                 tp->bufmgr_config.mbuf_read_dma_low_water =
15304                         DEFAULT_MB_RDMA_LOW_WATER;
15305                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15306                         DEFAULT_MB_MACRX_LOW_WATER;
15307                 tp->bufmgr_config.mbuf_high_water =
15308                         DEFAULT_MB_HIGH_WATER;
15309
15310                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15311                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15312                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15313                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15314                 tp->bufmgr_config.mbuf_high_water_jumbo =
15315                         DEFAULT_MB_HIGH_WATER_JUMBO;
15316         }
15317
15318         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15319         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15320 }
15321
15322 static char * __devinit tg3_phy_string(struct tg3 *tp)
15323 {
15324         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15325         case TG3_PHY_ID_BCM5400:        return "5400";
15326         case TG3_PHY_ID_BCM5401:        return "5401";
15327         case TG3_PHY_ID_BCM5411:        return "5411";
15328         case TG3_PHY_ID_BCM5701:        return "5701";
15329         case TG3_PHY_ID_BCM5703:        return "5703";
15330         case TG3_PHY_ID_BCM5704:        return "5704";
15331         case TG3_PHY_ID_BCM5705:        return "5705";
15332         case TG3_PHY_ID_BCM5750:        return "5750";
15333         case TG3_PHY_ID_BCM5752:        return "5752";
15334         case TG3_PHY_ID_BCM5714:        return "5714";
15335         case TG3_PHY_ID_BCM5780:        return "5780";
15336         case TG3_PHY_ID_BCM5755:        return "5755";
15337         case TG3_PHY_ID_BCM5787:        return "5787";
15338         case TG3_PHY_ID_BCM5784:        return "5784";
15339         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15340         case TG3_PHY_ID_BCM5906:        return "5906";
15341         case TG3_PHY_ID_BCM5761:        return "5761";
15342         case TG3_PHY_ID_BCM5718C:       return "5718C";
15343         case TG3_PHY_ID_BCM5718S:       return "5718S";
15344         case TG3_PHY_ID_BCM57765:       return "57765";
15345         case TG3_PHY_ID_BCM5719C:       return "5719C";
15346         case TG3_PHY_ID_BCM5720C:       return "5720C";
15347         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15348         case 0:                 return "serdes";
15349         default:                return "unknown";
15350         }
15351 }
15352
15353 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15354 {
15355         if (tg3_flag(tp, PCI_EXPRESS)) {
15356                 strcpy(str, "PCI Express");
15357                 return str;
15358         } else if (tg3_flag(tp, PCIX_MODE)) {
15359                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15360
15361                 strcpy(str, "PCIX:");
15362
15363                 if ((clock_ctrl == 7) ||
15364                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15365                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15366                         strcat(str, "133MHz");
15367                 else if (clock_ctrl == 0)
15368                         strcat(str, "33MHz");
15369                 else if (clock_ctrl == 2)
15370                         strcat(str, "50MHz");
15371                 else if (clock_ctrl == 4)
15372                         strcat(str, "66MHz");
15373                 else if (clock_ctrl == 6)
15374                         strcat(str, "100MHz");
15375         } else {
15376                 strcpy(str, "PCI:");
15377                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15378                         strcat(str, "66MHz");
15379                 else
15380                         strcat(str, "33MHz");
15381         }
15382         if (tg3_flag(tp, PCI_32BIT))
15383                 strcat(str, ":32-bit");
15384         else
15385                 strcat(str, ":64-bit");
15386         return str;
15387 }
15388
15389 static void __devinit tg3_init_coal(struct tg3 *tp)
15390 {
15391         struct ethtool_coalesce *ec = &tp->coal;
15392
15393         memset(ec, 0, sizeof(*ec));
15394         ec->cmd = ETHTOOL_GCOALESCE;
15395         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15396         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15397         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15398         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15399         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15400         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15401         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15402         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15403         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15404
15405         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15406                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15407                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15408                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15409                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15410                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15411         }
15412
15413         if (tg3_flag(tp, 5705_PLUS)) {
15414                 ec->rx_coalesce_usecs_irq = 0;
15415                 ec->tx_coalesce_usecs_irq = 0;
15416                 ec->stats_block_coalesce_usecs = 0;
15417         }
15418 }
15419
15420 static int __devinit tg3_init_one(struct pci_dev *pdev,
15421                                   const struct pci_device_id *ent)
15422 {
15423         struct net_device *dev;
15424         struct tg3 *tp;
15425         int i, err, pm_cap;
15426         u32 sndmbx, rcvmbx, intmbx;
15427         char str[40];
15428         u64 dma_mask, persist_dma_mask;
15429         netdev_features_t features = 0;
15430
15431         printk_once(KERN_INFO "%s\n", version);
15432
15433         err = pci_enable_device(pdev);
15434         if (err) {
15435                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15436                 return err;
15437         }
15438
15439         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15440         if (err) {
15441                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15442                 goto err_out_disable_pdev;
15443         }
15444
15445         pci_set_master(pdev);
15446
15447         /* Find power-management capability. */
15448         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15449         if (pm_cap == 0) {
15450                 dev_err(&pdev->dev,
15451                         "Cannot find Power Management capability, aborting\n");
15452                 err = -EIO;
15453                 goto err_out_free_res;
15454         }
15455
15456         err = pci_set_power_state(pdev, PCI_D0);
15457         if (err) {
15458                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15459                 goto err_out_free_res;
15460         }
15461
15462         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15463         if (!dev) {
15464                 err = -ENOMEM;
15465                 goto err_out_power_down;
15466         }
15467
15468         SET_NETDEV_DEV(dev, &pdev->dev);
15469
15470         tp = netdev_priv(dev);
15471         tp->pdev = pdev;
15472         tp->dev = dev;
15473         tp->pm_cap = pm_cap;
15474         tp->rx_mode = TG3_DEF_RX_MODE;
15475         tp->tx_mode = TG3_DEF_TX_MODE;
15476
15477         if (tg3_debug > 0)
15478                 tp->msg_enable = tg3_debug;
15479         else
15480                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15481
15482         /* The word/byte swap controls here control register access byte
15483          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15484          * setting below.
15485          */
15486         tp->misc_host_ctrl =
15487                 MISC_HOST_CTRL_MASK_PCI_INT |
15488                 MISC_HOST_CTRL_WORD_SWAP |
15489                 MISC_HOST_CTRL_INDIR_ACCESS |
15490                 MISC_HOST_CTRL_PCISTATE_RW;
15491
15492         /* The NONFRM (non-frame) byte/word swap controls take effect
15493          * on descriptor entries, anything which isn't packet data.
15494          *
15495          * The StrongARM chips on the board (one for tx, one for rx)
15496          * are running in big-endian mode.
15497          */
15498         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15499                         GRC_MODE_WSWAP_NONFRM_DATA);
15500 #ifdef __BIG_ENDIAN
15501         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15502 #endif
15503         spin_lock_init(&tp->lock);
15504         spin_lock_init(&tp->indirect_lock);
15505         INIT_WORK(&tp->reset_task, tg3_reset_task);
15506
15507         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15508         if (!tp->regs) {
15509                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15510                 err = -ENOMEM;
15511                 goto err_out_free_dev;
15512         }
15513
15514         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15515             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15516             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15517             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15518             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15519             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15520             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15521             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15522                 tg3_flag_set(tp, ENABLE_APE);
15523                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15524                 if (!tp->aperegs) {
15525                         dev_err(&pdev->dev,
15526                                 "Cannot map APE registers, aborting\n");
15527                         err = -ENOMEM;
15528                         goto err_out_iounmap;
15529                 }
15530         }
15531
15532         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15533         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15534
15535         dev->ethtool_ops = &tg3_ethtool_ops;
15536         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15537         dev->netdev_ops = &tg3_netdev_ops;
15538         dev->irq = pdev->irq;
15539
15540         err = tg3_get_invariants(tp);
15541         if (err) {
15542                 dev_err(&pdev->dev,
15543                         "Problem fetching invariants of chip, aborting\n");
15544                 goto err_out_apeunmap;
15545         }
15546
15547         /* The EPB bridge inside 5714, 5715, and 5780 and any
15548          * device behind the EPB cannot support DMA addresses > 40-bit.
15549          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15550          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15551          * do DMA address check in tg3_start_xmit().
15552          */
15553         if (tg3_flag(tp, IS_5788))
15554                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15555         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15556                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15557 #ifdef CONFIG_HIGHMEM
15558                 dma_mask = DMA_BIT_MASK(64);
15559 #endif
15560         } else
15561                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15562
15563         /* Configure DMA attributes. */
15564         if (dma_mask > DMA_BIT_MASK(32)) {
15565                 err = pci_set_dma_mask(pdev, dma_mask);
15566                 if (!err) {
15567                         features |= NETIF_F_HIGHDMA;
15568                         err = pci_set_consistent_dma_mask(pdev,
15569                                                           persist_dma_mask);
15570                         if (err < 0) {
15571                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15572                                         "DMA for consistent allocations\n");
15573                                 goto err_out_apeunmap;
15574                         }
15575                 }
15576         }
15577         if (err || dma_mask == DMA_BIT_MASK(32)) {
15578                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15579                 if (err) {
15580                         dev_err(&pdev->dev,
15581                                 "No usable DMA configuration, aborting\n");
15582                         goto err_out_apeunmap;
15583                 }
15584         }
15585
15586         tg3_init_bufmgr_config(tp);
15587
15588         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15589
15590         /* 5700 B0 chips do not support checksumming correctly due
15591          * to hardware bugs.
15592          */
15593         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15594                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15595
15596                 if (tg3_flag(tp, 5755_PLUS))
15597                         features |= NETIF_F_IPV6_CSUM;
15598         }
15599
15600         /* TSO is on by default on chips that support hardware TSO.
15601          * Firmware TSO on older chips gives lower performance, so it
15602          * is off by default, but can be enabled using ethtool.
15603          */
15604         if ((tg3_flag(tp, HW_TSO_1) ||
15605              tg3_flag(tp, HW_TSO_2) ||
15606              tg3_flag(tp, HW_TSO_3)) &&
15607             (features & NETIF_F_IP_CSUM))
15608                 features |= NETIF_F_TSO;
15609         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15610                 if (features & NETIF_F_IPV6_CSUM)
15611                         features |= NETIF_F_TSO6;
15612                 if (tg3_flag(tp, HW_TSO_3) ||
15613                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15614                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15615                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15616                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15617                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15618                         features |= NETIF_F_TSO_ECN;
15619         }
15620
15621         dev->features |= features;
15622         dev->vlan_features |= features;
15623
15624         /*
15625          * Add loopback capability only for a subset of devices that support
15626          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15627          * loopback for the remaining devices.
15628          */
15629         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15630             !tg3_flag(tp, CPMU_PRESENT))
15631                 /* Add the loopback capability */
15632                 features |= NETIF_F_LOOPBACK;
15633
15634         dev->hw_features |= features;
15635
15636         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15637             !tg3_flag(tp, TSO_CAPABLE) &&
15638             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15639                 tg3_flag_set(tp, MAX_RXPEND_64);
15640                 tp->rx_pending = 63;
15641         }
15642
15643         err = tg3_get_device_address(tp);
15644         if (err) {
15645                 dev_err(&pdev->dev,
15646                         "Could not obtain valid ethernet address, aborting\n");
15647                 goto err_out_apeunmap;
15648         }
15649
15650         /*
15651          * Reset chip in case UNDI or EFI driver did not shutdown
15652          * DMA self test will enable WDMAC and we'll see (spurious)
15653          * pending DMA on the PCI bus at that point.
15654          */
15655         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15656             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15657                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15658                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15659         }
15660
15661         err = tg3_test_dma(tp);
15662         if (err) {
15663                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15664                 goto err_out_apeunmap;
15665         }
15666
15667         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15668         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15669         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15670         for (i = 0; i < tp->irq_max; i++) {
15671                 struct tg3_napi *tnapi = &tp->napi[i];
15672
15673                 tnapi->tp = tp;
15674                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15675
15676                 tnapi->int_mbox = intmbx;
15677                 if (i <= 4)
15678                         intmbx += 0x8;
15679                 else
15680                         intmbx += 0x4;
15681
15682                 tnapi->consmbox = rcvmbx;
15683                 tnapi->prodmbox = sndmbx;
15684
15685                 if (i)
15686                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15687                 else
15688                         tnapi->coal_now = HOSTCC_MODE_NOW;
15689
15690                 if (!tg3_flag(tp, SUPPORT_MSIX))
15691                         break;
15692
15693                 /*
15694                  * If we support MSIX, we'll be using RSS.  If we're using
15695                  * RSS, the first vector only handles link interrupts and the
15696                  * remaining vectors handle rx and tx interrupts.  Reuse the
15697                  * mailbox values for the next iteration.  The values we setup
15698                  * above are still useful for the single vectored mode.
15699                  */
15700                 if (!i)
15701                         continue;
15702
15703                 rcvmbx += 0x8;
15704
15705                 if (sndmbx & 0x4)
15706                         sndmbx -= 0x4;
15707                 else
15708                         sndmbx += 0xc;
15709         }
15710
15711         tg3_init_coal(tp);
15712
15713         pci_set_drvdata(pdev, dev);
15714
15715         if (tg3_flag(tp, 5717_PLUS)) {
15716                 /* Resume a low-power mode */
15717                 tg3_frob_aux_power(tp, false);
15718         }
15719
15720         err = register_netdev(dev);
15721         if (err) {
15722                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15723                 goto err_out_apeunmap;
15724         }
15725
15726         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15727                     tp->board_part_number,
15728                     tp->pci_chip_rev_id,
15729                     tg3_bus_string(tp, str),
15730                     dev->dev_addr);
15731
15732         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15733                 struct phy_device *phydev;
15734                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15735                 netdev_info(dev,
15736                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15737                             phydev->drv->name, dev_name(&phydev->dev));
15738         } else {
15739                 char *ethtype;
15740
15741                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15742                         ethtype = "10/100Base-TX";
15743                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15744                         ethtype = "1000Base-SX";
15745                 else
15746                         ethtype = "10/100/1000Base-T";
15747
15748                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15749                             "(WireSpeed[%d], EEE[%d])\n",
15750                             tg3_phy_string(tp), ethtype,
15751                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15752                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15753         }
15754
15755         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15756                     (dev->features & NETIF_F_RXCSUM) != 0,
15757                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15758                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15759                     tg3_flag(tp, ENABLE_ASF) != 0,
15760                     tg3_flag(tp, TSO_CAPABLE) != 0);
15761         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15762                     tp->dma_rwctrl,
15763                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15764                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15765
15766         pci_save_state(pdev);
15767
15768         return 0;
15769
15770 err_out_apeunmap:
15771         if (tp->aperegs) {
15772                 iounmap(tp->aperegs);
15773                 tp->aperegs = NULL;
15774         }
15775
15776 err_out_iounmap:
15777         if (tp->regs) {
15778                 iounmap(tp->regs);
15779                 tp->regs = NULL;
15780         }
15781
15782 err_out_free_dev:
15783         free_netdev(dev);
15784
15785 err_out_power_down:
15786         pci_set_power_state(pdev, PCI_D3hot);
15787
15788 err_out_free_res:
15789         pci_release_regions(pdev);
15790
15791 err_out_disable_pdev:
15792         pci_disable_device(pdev);
15793         pci_set_drvdata(pdev, NULL);
15794         return err;
15795 }
15796
15797 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15798 {
15799         struct net_device *dev = pci_get_drvdata(pdev);
15800
15801         if (dev) {
15802                 struct tg3 *tp = netdev_priv(dev);
15803
15804                 if (tp->fw)
15805                         release_firmware(tp->fw);
15806
15807                 tg3_reset_task_cancel(tp);
15808
15809                 if (tg3_flag(tp, USE_PHYLIB)) {
15810                         tg3_phy_fini(tp);
15811                         tg3_mdio_fini(tp);
15812                 }
15813
15814                 unregister_netdev(dev);
15815                 if (tp->aperegs) {
15816                         iounmap(tp->aperegs);
15817                         tp->aperegs = NULL;
15818                 }
15819                 if (tp->regs) {
15820                         iounmap(tp->regs);
15821                         tp->regs = NULL;
15822                 }
15823                 free_netdev(dev);
15824                 pci_release_regions(pdev);
15825                 pci_disable_device(pdev);
15826                 pci_set_drvdata(pdev, NULL);
15827         }
15828 }
15829
15830 #ifdef CONFIG_PM_SLEEP
15831 static int tg3_suspend(struct device *device)
15832 {
15833         struct pci_dev *pdev = to_pci_dev(device);
15834         struct net_device *dev = pci_get_drvdata(pdev);
15835         struct tg3 *tp = netdev_priv(dev);
15836         int err;
15837
15838         if (!netif_running(dev))
15839                 return 0;
15840
15841         tg3_reset_task_cancel(tp);
15842         tg3_phy_stop(tp);
15843         tg3_netif_stop(tp);
15844
15845         del_timer_sync(&tp->timer);
15846
15847         tg3_full_lock(tp, 1);
15848         tg3_disable_ints(tp);
15849         tg3_full_unlock(tp);
15850
15851         netif_device_detach(dev);
15852
15853         tg3_full_lock(tp, 0);
15854         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15855         tg3_flag_clear(tp, INIT_COMPLETE);
15856         tg3_full_unlock(tp);
15857
15858         err = tg3_power_down_prepare(tp);
15859         if (err) {
15860                 int err2;
15861
15862                 tg3_full_lock(tp, 0);
15863
15864                 tg3_flag_set(tp, INIT_COMPLETE);
15865                 err2 = tg3_restart_hw(tp, 1);
15866                 if (err2)
15867                         goto out;
15868
15869                 tp->timer.expires = jiffies + tp->timer_offset;
15870                 add_timer(&tp->timer);
15871
15872                 netif_device_attach(dev);
15873                 tg3_netif_start(tp);
15874
15875 out:
15876                 tg3_full_unlock(tp);
15877
15878                 if (!err2)
15879                         tg3_phy_start(tp);
15880         }
15881
15882         return err;
15883 }
15884
15885 static int tg3_resume(struct device *device)
15886 {
15887         struct pci_dev *pdev = to_pci_dev(device);
15888         struct net_device *dev = pci_get_drvdata(pdev);
15889         struct tg3 *tp = netdev_priv(dev);
15890         int err;
15891
15892         if (!netif_running(dev))
15893                 return 0;
15894
15895         netif_device_attach(dev);
15896
15897         tg3_full_lock(tp, 0);
15898
15899         tg3_flag_set(tp, INIT_COMPLETE);
15900         err = tg3_restart_hw(tp, 1);
15901         if (err)
15902                 goto out;
15903
15904         tp->timer.expires = jiffies + tp->timer_offset;
15905         add_timer(&tp->timer);
15906
15907         tg3_netif_start(tp);
15908
15909 out:
15910         tg3_full_unlock(tp);
15911
15912         if (!err)
15913                 tg3_phy_start(tp);
15914
15915         return err;
15916 }
15917
15918 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15919 #define TG3_PM_OPS (&tg3_pm_ops)
15920
15921 #else
15922
15923 #define TG3_PM_OPS NULL
15924
15925 #endif /* CONFIG_PM_SLEEP */
15926
15927 /**
15928  * tg3_io_error_detected - called when PCI error is detected
15929  * @pdev: Pointer to PCI device
15930  * @state: The current pci connection state
15931  *
15932  * This function is called after a PCI bus error affecting
15933  * this device has been detected.
15934  */
15935 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15936                                               pci_channel_state_t state)
15937 {
15938         struct net_device *netdev = pci_get_drvdata(pdev);
15939         struct tg3 *tp = netdev_priv(netdev);
15940         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15941
15942         netdev_info(netdev, "PCI I/O error detected\n");
15943
15944         rtnl_lock();
15945
15946         if (!netif_running(netdev))
15947                 goto done;
15948
15949         tg3_phy_stop(tp);
15950
15951         tg3_netif_stop(tp);
15952
15953         del_timer_sync(&tp->timer);
15954
15955         /* Want to make sure that the reset task doesn't run */
15956         tg3_reset_task_cancel(tp);
15957         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15958
15959         netif_device_detach(netdev);
15960
15961         /* Clean up software state, even if MMIO is blocked */
15962         tg3_full_lock(tp, 0);
15963         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15964         tg3_full_unlock(tp);
15965
15966 done:
15967         if (state == pci_channel_io_perm_failure)
15968                 err = PCI_ERS_RESULT_DISCONNECT;
15969         else
15970                 pci_disable_device(pdev);
15971
15972         rtnl_unlock();
15973
15974         return err;
15975 }
15976
15977 /**
15978  * tg3_io_slot_reset - called after the pci bus has been reset.
15979  * @pdev: Pointer to PCI device
15980  *
15981  * Restart the card from scratch, as if from a cold-boot.
15982  * At this point, the card has exprienced a hard reset,
15983  * followed by fixups by BIOS, and has its config space
15984  * set up identically to what it was at cold boot.
15985  */
15986 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15987 {
15988         struct net_device *netdev = pci_get_drvdata(pdev);
15989         struct tg3 *tp = netdev_priv(netdev);
15990         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15991         int err;
15992
15993         rtnl_lock();
15994
15995         if (pci_enable_device(pdev)) {
15996                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15997                 goto done;
15998         }
15999
16000         pci_set_master(pdev);
16001         pci_restore_state(pdev);
16002         pci_save_state(pdev);
16003
16004         if (!netif_running(netdev)) {
16005                 rc = PCI_ERS_RESULT_RECOVERED;
16006                 goto done;
16007         }
16008
16009         err = tg3_power_up(tp);
16010         if (err)
16011                 goto done;
16012
16013         rc = PCI_ERS_RESULT_RECOVERED;
16014
16015 done:
16016         rtnl_unlock();
16017
16018         return rc;
16019 }
16020
16021 /**
16022  * tg3_io_resume - called when traffic can start flowing again.
16023  * @pdev: Pointer to PCI device
16024  *
16025  * This callback is called when the error recovery driver tells
16026  * us that its OK to resume normal operation.
16027  */
16028 static void tg3_io_resume(struct pci_dev *pdev)
16029 {
16030         struct net_device *netdev = pci_get_drvdata(pdev);
16031         struct tg3 *tp = netdev_priv(netdev);
16032         int err;
16033
16034         rtnl_lock();
16035
16036         if (!netif_running(netdev))
16037                 goto done;
16038
16039         tg3_full_lock(tp, 0);
16040         tg3_flag_set(tp, INIT_COMPLETE);
16041         err = tg3_restart_hw(tp, 1);
16042         tg3_full_unlock(tp);
16043         if (err) {
16044                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16045                 goto done;
16046         }
16047
16048         netif_device_attach(netdev);
16049
16050         tp->timer.expires = jiffies + tp->timer_offset;
16051         add_timer(&tp->timer);
16052
16053         tg3_netif_start(tp);
16054
16055         tg3_phy_start(tp);
16056
16057 done:
16058         rtnl_unlock();
16059 }
16060
16061 static struct pci_error_handlers tg3_err_handler = {
16062         .error_detected = tg3_io_error_detected,
16063         .slot_reset     = tg3_io_slot_reset,
16064         .resume         = tg3_io_resume
16065 };
16066
16067 static struct pci_driver tg3_driver = {
16068         .name           = DRV_MODULE_NAME,
16069         .id_table       = tg3_pci_tbl,
16070         .probe          = tg3_init_one,
16071         .remove         = __devexit_p(tg3_remove_one),
16072         .err_handler    = &tg3_err_handler,
16073         .driver.pm      = TG3_PM_OPS,
16074 };
16075
16076 static int __init tg3_init(void)
16077 {
16078         return pci_register_driver(&tg3_driver);
16079 }
16080
16081 static void __exit tg3_cleanup(void)
16082 {
16083         pci_unregister_driver(&tg3_driver);
16084 }
16085
16086 module_init(tg3_init);
16087 module_exit(tg3_cleanup);