]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
usb: ch9.h: usb_endpoint_maxp() uses __le16_to_cpu()
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static int tg3_setup_phy(struct tg3 *, int);
2713 static int tg3_halt_cpu(struct tg3 *, u32);
2714
2715 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2716 {
2717         u32 val;
2718
2719         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2720                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2721                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2722                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2723
2724                         sg_dig_ctrl |=
2725                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2726                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2727                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2728                 }
2729                 return;
2730         }
2731
2732         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2733                 tg3_bmcr_reset(tp);
2734                 val = tr32(GRC_MISC_CFG);
2735                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2736                 udelay(40);
2737                 return;
2738         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2739                 u32 phytest;
2740                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2741                         u32 phy;
2742
2743                         tg3_writephy(tp, MII_ADVERTISE, 0);
2744                         tg3_writephy(tp, MII_BMCR,
2745                                      BMCR_ANENABLE | BMCR_ANRESTART);
2746
2747                         tg3_writephy(tp, MII_TG3_FET_TEST,
2748                                      phytest | MII_TG3_FET_SHADOW_EN);
2749                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2750                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2751                                 tg3_writephy(tp,
2752                                              MII_TG3_FET_SHDW_AUXMODE4,
2753                                              phy);
2754                         }
2755                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2756                 }
2757                 return;
2758         } else if (do_low_power) {
2759                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2760                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2761
2762                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2763                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2764                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2765                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2766         }
2767
2768         /* The PHY should not be powered down on some chips because
2769          * of bugs.
2770          */
2771         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2772             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2773             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2774              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2775                 return;
2776
2777         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2778             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2779                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2780                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2781                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2782                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2783         }
2784
2785         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2786 }
2787
2788 /* tp->lock is held. */
2789 static int tg3_nvram_lock(struct tg3 *tp)
2790 {
2791         if (tg3_flag(tp, NVRAM)) {
2792                 int i;
2793
2794                 if (tp->nvram_lock_cnt == 0) {
2795                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2796                         for (i = 0; i < 8000; i++) {
2797                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2798                                         break;
2799                                 udelay(20);
2800                         }
2801                         if (i == 8000) {
2802                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2803                                 return -ENODEV;
2804                         }
2805                 }
2806                 tp->nvram_lock_cnt++;
2807         }
2808         return 0;
2809 }
2810
2811 /* tp->lock is held. */
2812 static void tg3_nvram_unlock(struct tg3 *tp)
2813 {
2814         if (tg3_flag(tp, NVRAM)) {
2815                 if (tp->nvram_lock_cnt > 0)
2816                         tp->nvram_lock_cnt--;
2817                 if (tp->nvram_lock_cnt == 0)
2818                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2819         }
2820 }
2821
2822 /* tp->lock is held. */
2823 static void tg3_enable_nvram_access(struct tg3 *tp)
2824 {
2825         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2826                 u32 nvaccess = tr32(NVRAM_ACCESS);
2827
2828                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2829         }
2830 }
2831
2832 /* tp->lock is held. */
2833 static void tg3_disable_nvram_access(struct tg3 *tp)
2834 {
2835         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2836                 u32 nvaccess = tr32(NVRAM_ACCESS);
2837
2838                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2839         }
2840 }
2841
2842 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2843                                         u32 offset, u32 *val)
2844 {
2845         u32 tmp;
2846         int i;
2847
2848         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2849                 return -EINVAL;
2850
2851         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2852                                         EEPROM_ADDR_DEVID_MASK |
2853                                         EEPROM_ADDR_READ);
2854         tw32(GRC_EEPROM_ADDR,
2855              tmp |
2856              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2857              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2858               EEPROM_ADDR_ADDR_MASK) |
2859              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2860
2861         for (i = 0; i < 1000; i++) {
2862                 tmp = tr32(GRC_EEPROM_ADDR);
2863
2864                 if (tmp & EEPROM_ADDR_COMPLETE)
2865                         break;
2866                 msleep(1);
2867         }
2868         if (!(tmp & EEPROM_ADDR_COMPLETE))
2869                 return -EBUSY;
2870
2871         tmp = tr32(GRC_EEPROM_DATA);
2872
2873         /*
2874          * The data will always be opposite the native endian
2875          * format.  Perform a blind byteswap to compensate.
2876          */
2877         *val = swab32(tmp);
2878
2879         return 0;
2880 }
2881
2882 #define NVRAM_CMD_TIMEOUT 10000
2883
2884 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2885 {
2886         int i;
2887
2888         tw32(NVRAM_CMD, nvram_cmd);
2889         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2890                 udelay(10);
2891                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2892                         udelay(10);
2893                         break;
2894                 }
2895         }
2896
2897         if (i == NVRAM_CMD_TIMEOUT)
2898                 return -EBUSY;
2899
2900         return 0;
2901 }
2902
2903 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2904 {
2905         if (tg3_flag(tp, NVRAM) &&
2906             tg3_flag(tp, NVRAM_BUFFERED) &&
2907             tg3_flag(tp, FLASH) &&
2908             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2909             (tp->nvram_jedecnum == JEDEC_ATMEL))
2910
2911                 addr = ((addr / tp->nvram_pagesize) <<
2912                         ATMEL_AT45DB0X1B_PAGE_POS) +
2913                        (addr % tp->nvram_pagesize);
2914
2915         return addr;
2916 }
2917
2918 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2919 {
2920         if (tg3_flag(tp, NVRAM) &&
2921             tg3_flag(tp, NVRAM_BUFFERED) &&
2922             tg3_flag(tp, FLASH) &&
2923             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2924             (tp->nvram_jedecnum == JEDEC_ATMEL))
2925
2926                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2927                         tp->nvram_pagesize) +
2928                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2929
2930         return addr;
2931 }
2932
2933 /* NOTE: Data read in from NVRAM is byteswapped according to
2934  * the byteswapping settings for all other register accesses.
2935  * tg3 devices are BE devices, so on a BE machine, the data
2936  * returned will be exactly as it is seen in NVRAM.  On a LE
2937  * machine, the 32-bit value will be byteswapped.
2938  */
2939 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2940 {
2941         int ret;
2942
2943         if (!tg3_flag(tp, NVRAM))
2944                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2945
2946         offset = tg3_nvram_phys_addr(tp, offset);
2947
2948         if (offset > NVRAM_ADDR_MSK)
2949                 return -EINVAL;
2950
2951         ret = tg3_nvram_lock(tp);
2952         if (ret)
2953                 return ret;
2954
2955         tg3_enable_nvram_access(tp);
2956
2957         tw32(NVRAM_ADDR, offset);
2958         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2959                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2960
2961         if (ret == 0)
2962                 *val = tr32(NVRAM_RDDATA);
2963
2964         tg3_disable_nvram_access(tp);
2965
2966         tg3_nvram_unlock(tp);
2967
2968         return ret;
2969 }
2970
2971 /* Ensures NVRAM data is in bytestream format. */
2972 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2973 {
2974         u32 v;
2975         int res = tg3_nvram_read(tp, offset, &v);
2976         if (!res)
2977                 *val = cpu_to_be32(v);
2978         return res;
2979 }
2980
2981 #define RX_CPU_SCRATCH_BASE     0x30000
2982 #define RX_CPU_SCRATCH_SIZE     0x04000
2983 #define TX_CPU_SCRATCH_BASE     0x34000
2984 #define TX_CPU_SCRATCH_SIZE     0x04000
2985
2986 /* tp->lock is held. */
2987 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
2988 {
2989         int i;
2990
2991         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
2992
2993         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2994                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
2995
2996                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
2997                 return 0;
2998         }
2999         if (offset == RX_CPU_BASE) {
3000                 for (i = 0; i < 10000; i++) {
3001                         tw32(offset + CPU_STATE, 0xffffffff);
3002                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3003                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3004                                 break;
3005                 }
3006
3007                 tw32(offset + CPU_STATE, 0xffffffff);
3008                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3009                 udelay(10);
3010         } else {
3011                 for (i = 0; i < 10000; i++) {
3012                         tw32(offset + CPU_STATE, 0xffffffff);
3013                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3014                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3015                                 break;
3016                 }
3017         }
3018
3019         if (i >= 10000) {
3020                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3021                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3022                 return -ENODEV;
3023         }
3024
3025         /* Clear firmware's nvram arbitration. */
3026         if (tg3_flag(tp, NVRAM))
3027                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3028         return 0;
3029 }
3030
3031 struct fw_info {
3032         unsigned int fw_base;
3033         unsigned int fw_len;
3034         const __be32 *fw_data;
3035 };
3036
3037 /* tp->lock is held. */
3038 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3039                                  u32 cpu_scratch_base, int cpu_scratch_size,
3040                                  struct fw_info *info)
3041 {
3042         int err, lock_err, i;
3043         void (*write_op)(struct tg3 *, u32, u32);
3044
3045         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3046                 netdev_err(tp->dev,
3047                            "%s: Trying to load TX cpu firmware which is 5705\n",
3048                            __func__);
3049                 return -EINVAL;
3050         }
3051
3052         if (tg3_flag(tp, 5705_PLUS))
3053                 write_op = tg3_write_mem;
3054         else
3055                 write_op = tg3_write_indirect_reg32;
3056
3057         /* It is possible that bootcode is still loading at this point.
3058          * Get the nvram lock first before halting the cpu.
3059          */
3060         lock_err = tg3_nvram_lock(tp);
3061         err = tg3_halt_cpu(tp, cpu_base);
3062         if (!lock_err)
3063                 tg3_nvram_unlock(tp);
3064         if (err)
3065                 goto out;
3066
3067         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3068                 write_op(tp, cpu_scratch_base + i, 0);
3069         tw32(cpu_base + CPU_STATE, 0xffffffff);
3070         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3071         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3072                 write_op(tp, (cpu_scratch_base +
3073                               (info->fw_base & 0xffff) +
3074                               (i * sizeof(u32))),
3075                               be32_to_cpu(info->fw_data[i]));
3076
3077         err = 0;
3078
3079 out:
3080         return err;
3081 }
3082
3083 /* tp->lock is held. */
3084 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3085 {
3086         struct fw_info info;
3087         const __be32 *fw_data;
3088         int err, i;
3089
3090         fw_data = (void *)tp->fw->data;
3091
3092         /* Firmware blob starts with version numbers, followed by
3093            start address and length. We are setting complete length.
3094            length = end_address_of_bss - start_address_of_text.
3095            Remainder is the blob to be loaded contiguously
3096            from start address. */
3097
3098         info.fw_base = be32_to_cpu(fw_data[1]);
3099         info.fw_len = tp->fw->size - 12;
3100         info.fw_data = &fw_data[3];
3101
3102         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3103                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3104                                     &info);
3105         if (err)
3106                 return err;
3107
3108         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3109                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3110                                     &info);
3111         if (err)
3112                 return err;
3113
3114         /* Now startup only the RX cpu. */
3115         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3116         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3117
3118         for (i = 0; i < 5; i++) {
3119                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3120                         break;
3121                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3122                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3123                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3124                 udelay(1000);
3125         }
3126         if (i >= 5) {
3127                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3128                            "should be %08x\n", __func__,
3129                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3130                 return -ENODEV;
3131         }
3132         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3133         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3134
3135         return 0;
3136 }
3137
3138 /* tp->lock is held. */
3139 static int tg3_load_tso_firmware(struct tg3 *tp)
3140 {
3141         struct fw_info info;
3142         const __be32 *fw_data;
3143         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3144         int err, i;
3145
3146         if (tg3_flag(tp, HW_TSO_1) ||
3147             tg3_flag(tp, HW_TSO_2) ||
3148             tg3_flag(tp, HW_TSO_3))
3149                 return 0;
3150
3151         fw_data = (void *)tp->fw->data;
3152
3153         /* Firmware blob starts with version numbers, followed by
3154            start address and length. We are setting complete length.
3155            length = end_address_of_bss - start_address_of_text.
3156            Remainder is the blob to be loaded contiguously
3157            from start address. */
3158
3159         info.fw_base = be32_to_cpu(fw_data[1]);
3160         cpu_scratch_size = tp->fw_len;
3161         info.fw_len = tp->fw->size - 12;
3162         info.fw_data = &fw_data[3];
3163
3164         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3165                 cpu_base = RX_CPU_BASE;
3166                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3167         } else {
3168                 cpu_base = TX_CPU_BASE;
3169                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3170                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3171         }
3172
3173         err = tg3_load_firmware_cpu(tp, cpu_base,
3174                                     cpu_scratch_base, cpu_scratch_size,
3175                                     &info);
3176         if (err)
3177                 return err;
3178
3179         /* Now startup the cpu. */
3180         tw32(cpu_base + CPU_STATE, 0xffffffff);
3181         tw32_f(cpu_base + CPU_PC, info.fw_base);
3182
3183         for (i = 0; i < 5; i++) {
3184                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3185                         break;
3186                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3187                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3188                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3189                 udelay(1000);
3190         }
3191         if (i >= 5) {
3192                 netdev_err(tp->dev,
3193                            "%s fails to set CPU PC, is %08x should be %08x\n",
3194                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3195                 return -ENODEV;
3196         }
3197         tw32(cpu_base + CPU_STATE, 0xffffffff);
3198         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3199         return 0;
3200 }
3201
3202
3203 /* tp->lock is held. */
3204 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3205 {
3206         u32 addr_high, addr_low;
3207         int i;
3208
3209         addr_high = ((tp->dev->dev_addr[0] << 8) |
3210                      tp->dev->dev_addr[1]);
3211         addr_low = ((tp->dev->dev_addr[2] << 24) |
3212                     (tp->dev->dev_addr[3] << 16) |
3213                     (tp->dev->dev_addr[4] <<  8) |
3214                     (tp->dev->dev_addr[5] <<  0));
3215         for (i = 0; i < 4; i++) {
3216                 if (i == 1 && skip_mac_1)
3217                         continue;
3218                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3219                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3220         }
3221
3222         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3224                 for (i = 0; i < 12; i++) {
3225                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3226                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3227                 }
3228         }
3229
3230         addr_high = (tp->dev->dev_addr[0] +
3231                      tp->dev->dev_addr[1] +
3232                      tp->dev->dev_addr[2] +
3233                      tp->dev->dev_addr[3] +
3234                      tp->dev->dev_addr[4] +
3235                      tp->dev->dev_addr[5]) &
3236                 TX_BACKOFF_SEED_MASK;
3237         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3238 }
3239
3240 static void tg3_enable_register_access(struct tg3 *tp)
3241 {
3242         /*
3243          * Make sure register accesses (indirect or otherwise) will function
3244          * correctly.
3245          */
3246         pci_write_config_dword(tp->pdev,
3247                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3248 }
3249
3250 static int tg3_power_up(struct tg3 *tp)
3251 {
3252         int err;
3253
3254         tg3_enable_register_access(tp);
3255
3256         err = pci_set_power_state(tp->pdev, PCI_D0);
3257         if (!err) {
3258                 /* Switch out of Vaux if it is a NIC */
3259                 tg3_pwrsrc_switch_to_vmain(tp);
3260         } else {
3261                 netdev_err(tp->dev, "Transition to D0 failed\n");
3262         }
3263
3264         return err;
3265 }
3266
3267 static int tg3_power_down_prepare(struct tg3 *tp)
3268 {
3269         u32 misc_host_ctrl;
3270         bool device_should_wake, do_low_power;
3271
3272         tg3_enable_register_access(tp);
3273
3274         /* Restore the CLKREQ setting. */
3275         if (tg3_flag(tp, CLKREQ_BUG)) {
3276                 u16 lnkctl;
3277
3278                 pci_read_config_word(tp->pdev,
3279                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3280                                      &lnkctl);
3281                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3282                 pci_write_config_word(tp->pdev,
3283                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3284                                       lnkctl);
3285         }
3286
3287         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3288         tw32(TG3PCI_MISC_HOST_CTRL,
3289              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3290
3291         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3292                              tg3_flag(tp, WOL_ENABLE);
3293
3294         if (tg3_flag(tp, USE_PHYLIB)) {
3295                 do_low_power = false;
3296                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3297                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3298                         struct phy_device *phydev;
3299                         u32 phyid, advertising;
3300
3301                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3302
3303                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3304
3305                         tp->link_config.orig_speed = phydev->speed;
3306                         tp->link_config.orig_duplex = phydev->duplex;
3307                         tp->link_config.orig_autoneg = phydev->autoneg;
3308                         tp->link_config.orig_advertising = phydev->advertising;
3309
3310                         advertising = ADVERTISED_TP |
3311                                       ADVERTISED_Pause |
3312                                       ADVERTISED_Autoneg |
3313                                       ADVERTISED_10baseT_Half;
3314
3315                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3316                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3317                                         advertising |=
3318                                                 ADVERTISED_100baseT_Half |
3319                                                 ADVERTISED_100baseT_Full |
3320                                                 ADVERTISED_10baseT_Full;
3321                                 else
3322                                         advertising |= ADVERTISED_10baseT_Full;
3323                         }
3324
3325                         phydev->advertising = advertising;
3326
3327                         phy_start_aneg(phydev);
3328
3329                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3330                         if (phyid != PHY_ID_BCMAC131) {
3331                                 phyid &= PHY_BCM_OUI_MASK;
3332                                 if (phyid == PHY_BCM_OUI_1 ||
3333                                     phyid == PHY_BCM_OUI_2 ||
3334                                     phyid == PHY_BCM_OUI_3)
3335                                         do_low_power = true;
3336                         }
3337                 }
3338         } else {
3339                 do_low_power = true;
3340
3341                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3342                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3343                         tp->link_config.orig_speed = tp->link_config.speed;
3344                         tp->link_config.orig_duplex = tp->link_config.duplex;
3345                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3346                 }
3347
3348                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3349                         tp->link_config.speed = SPEED_10;
3350                         tp->link_config.duplex = DUPLEX_HALF;
3351                         tp->link_config.autoneg = AUTONEG_ENABLE;
3352                         tg3_setup_phy(tp, 0);
3353                 }
3354         }
3355
3356         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3357                 u32 val;
3358
3359                 val = tr32(GRC_VCPU_EXT_CTRL);
3360                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3361         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3362                 int i;
3363                 u32 val;
3364
3365                 for (i = 0; i < 200; i++) {
3366                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3367                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3368                                 break;
3369                         msleep(1);
3370                 }
3371         }
3372         if (tg3_flag(tp, WOL_CAP))
3373                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3374                                                      WOL_DRV_STATE_SHUTDOWN |
3375                                                      WOL_DRV_WOL |
3376                                                      WOL_SET_MAGIC_PKT);
3377
3378         if (device_should_wake) {
3379                 u32 mac_mode;
3380
3381                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3382                         if (do_low_power &&
3383                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3384                                 tg3_phy_auxctl_write(tp,
3385                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3386                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3387                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3388                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3389                                 udelay(40);
3390                         }
3391
3392                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3393                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3394                         else
3395                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3396
3397                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3398                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3399                             ASIC_REV_5700) {
3400                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3401                                              SPEED_100 : SPEED_10;
3402                                 if (tg3_5700_link_polarity(tp, speed))
3403                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3404                                 else
3405                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3406                         }
3407                 } else {
3408                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3409                 }
3410
3411                 if (!tg3_flag(tp, 5750_PLUS))
3412                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3413
3414                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3415                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3416                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3417                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3418
3419                 if (tg3_flag(tp, ENABLE_APE))
3420                         mac_mode |= MAC_MODE_APE_TX_EN |
3421                                     MAC_MODE_APE_RX_EN |
3422                                     MAC_MODE_TDE_ENABLE;
3423
3424                 tw32_f(MAC_MODE, mac_mode);
3425                 udelay(100);
3426
3427                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3428                 udelay(10);
3429         }
3430
3431         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3432             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3433              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3434                 u32 base_val;
3435
3436                 base_val = tp->pci_clock_ctrl;
3437                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3438                              CLOCK_CTRL_TXCLK_DISABLE);
3439
3440                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3441                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3442         } else if (tg3_flag(tp, 5780_CLASS) ||
3443                    tg3_flag(tp, CPMU_PRESENT) ||
3444                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3445                 /* do nothing */
3446         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3447                 u32 newbits1, newbits2;
3448
3449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3450                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3451                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3452                                     CLOCK_CTRL_TXCLK_DISABLE |
3453                                     CLOCK_CTRL_ALTCLK);
3454                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3455                 } else if (tg3_flag(tp, 5705_PLUS)) {
3456                         newbits1 = CLOCK_CTRL_625_CORE;
3457                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3458                 } else {
3459                         newbits1 = CLOCK_CTRL_ALTCLK;
3460                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3461                 }
3462
3463                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3464                             40);
3465
3466                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3467                             40);
3468
3469                 if (!tg3_flag(tp, 5705_PLUS)) {
3470                         u32 newbits3;
3471
3472                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3473                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3474                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3475                                             CLOCK_CTRL_TXCLK_DISABLE |
3476                                             CLOCK_CTRL_44MHZ_CORE);
3477                         } else {
3478                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3479                         }
3480
3481                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3482                                     tp->pci_clock_ctrl | newbits3, 40);
3483                 }
3484         }
3485
3486         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3487                 tg3_power_down_phy(tp, do_low_power);
3488
3489         tg3_frob_aux_power(tp, true);
3490
3491         /* Workaround for unstable PLL clock */
3492         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3493             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3494                 u32 val = tr32(0x7d00);
3495
3496                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3497                 tw32(0x7d00, val);
3498                 if (!tg3_flag(tp, ENABLE_ASF)) {
3499                         int err;
3500
3501                         err = tg3_nvram_lock(tp);
3502                         tg3_halt_cpu(tp, RX_CPU_BASE);
3503                         if (!err)
3504                                 tg3_nvram_unlock(tp);
3505                 }
3506         }
3507
3508         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3509
3510         return 0;
3511 }
3512
3513 static void tg3_power_down(struct tg3 *tp)
3514 {
3515         tg3_power_down_prepare(tp);
3516
3517         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3518         pci_set_power_state(tp->pdev, PCI_D3hot);
3519 }
3520
3521 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3522 {
3523         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3524         case MII_TG3_AUX_STAT_10HALF:
3525                 *speed = SPEED_10;
3526                 *duplex = DUPLEX_HALF;
3527                 break;
3528
3529         case MII_TG3_AUX_STAT_10FULL:
3530                 *speed = SPEED_10;
3531                 *duplex = DUPLEX_FULL;
3532                 break;
3533
3534         case MII_TG3_AUX_STAT_100HALF:
3535                 *speed = SPEED_100;
3536                 *duplex = DUPLEX_HALF;
3537                 break;
3538
3539         case MII_TG3_AUX_STAT_100FULL:
3540                 *speed = SPEED_100;
3541                 *duplex = DUPLEX_FULL;
3542                 break;
3543
3544         case MII_TG3_AUX_STAT_1000HALF:
3545                 *speed = SPEED_1000;
3546                 *duplex = DUPLEX_HALF;
3547                 break;
3548
3549         case MII_TG3_AUX_STAT_1000FULL:
3550                 *speed = SPEED_1000;
3551                 *duplex = DUPLEX_FULL;
3552                 break;
3553
3554         default:
3555                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3556                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3557                                  SPEED_10;
3558                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3559                                   DUPLEX_HALF;
3560                         break;
3561                 }
3562                 *speed = SPEED_INVALID;
3563                 *duplex = DUPLEX_INVALID;
3564                 break;
3565         }
3566 }
3567
3568 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3569 {
3570         int err = 0;
3571         u32 val, new_adv;
3572
3573         new_adv = ADVERTISE_CSMA;
3574         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3575         new_adv |= mii_advertise_flowctrl(flowctrl);
3576
3577         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3578         if (err)
3579                 goto done;
3580
3581         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3582                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3583
3584                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3585                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3586                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3587
3588                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3589                 if (err)
3590                         goto done;
3591         }
3592
3593         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3594                 goto done;
3595
3596         tw32(TG3_CPMU_EEE_MODE,
3597              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3598
3599         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3600         if (!err) {
3601                 u32 err2;
3602
3603                 val = 0;
3604                 /* Advertise 100-BaseTX EEE ability */
3605                 if (advertise & ADVERTISED_100baseT_Full)
3606                         val |= MDIO_AN_EEE_ADV_100TX;
3607                 /* Advertise 1000-BaseT EEE ability */
3608                 if (advertise & ADVERTISED_1000baseT_Full)
3609                         val |= MDIO_AN_EEE_ADV_1000T;
3610                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3611                 if (err)
3612                         val = 0;
3613
3614                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3615                 case ASIC_REV_5717:
3616                 case ASIC_REV_57765:
3617                 case ASIC_REV_57766:
3618                 case ASIC_REV_5719:
3619                         /* If we advertised any eee advertisements above... */
3620                         if (val)
3621                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3622                                       MII_TG3_DSP_TAP26_RMRXSTO |
3623                                       MII_TG3_DSP_TAP26_OPCSINPT;
3624                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3625                         /* Fall through */
3626                 case ASIC_REV_5720:
3627                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3628                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3629                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3630                 }
3631
3632                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3633                 if (!err)
3634                         err = err2;
3635         }
3636
3637 done:
3638         return err;
3639 }
3640
3641 static void tg3_phy_copper_begin(struct tg3 *tp)
3642 {
3643         u32 new_adv;
3644         int i;
3645
3646         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3647                 new_adv = ADVERTISED_10baseT_Half |
3648                           ADVERTISED_10baseT_Full;
3649                 if (tg3_flag(tp, WOL_SPEED_100MB))
3650                         new_adv |= ADVERTISED_100baseT_Half |
3651                                    ADVERTISED_100baseT_Full;
3652
3653                 tg3_phy_autoneg_cfg(tp, new_adv,
3654                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3655         } else if (tp->link_config.speed == SPEED_INVALID) {
3656                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3657                         tp->link_config.advertising &=
3658                                 ~(ADVERTISED_1000baseT_Half |
3659                                   ADVERTISED_1000baseT_Full);
3660
3661                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3662                                     tp->link_config.flowctrl);
3663         } else {
3664                 /* Asking for a specific link mode. */
3665                 if (tp->link_config.speed == SPEED_1000) {
3666                         if (tp->link_config.duplex == DUPLEX_FULL)
3667                                 new_adv = ADVERTISED_1000baseT_Full;
3668                         else
3669                                 new_adv = ADVERTISED_1000baseT_Half;
3670                 } else if (tp->link_config.speed == SPEED_100) {
3671                         if (tp->link_config.duplex == DUPLEX_FULL)
3672                                 new_adv = ADVERTISED_100baseT_Full;
3673                         else
3674                                 new_adv = ADVERTISED_100baseT_Half;
3675                 } else {
3676                         if (tp->link_config.duplex == DUPLEX_FULL)
3677                                 new_adv = ADVERTISED_10baseT_Full;
3678                         else
3679                                 new_adv = ADVERTISED_10baseT_Half;
3680                 }
3681
3682                 tg3_phy_autoneg_cfg(tp, new_adv,
3683                                     tp->link_config.flowctrl);
3684         }
3685
3686         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3687             tp->link_config.speed != SPEED_INVALID) {
3688                 u32 bmcr, orig_bmcr;
3689
3690                 tp->link_config.active_speed = tp->link_config.speed;
3691                 tp->link_config.active_duplex = tp->link_config.duplex;
3692
3693                 bmcr = 0;
3694                 switch (tp->link_config.speed) {
3695                 default:
3696                 case SPEED_10:
3697                         break;
3698
3699                 case SPEED_100:
3700                         bmcr |= BMCR_SPEED100;
3701                         break;
3702
3703                 case SPEED_1000:
3704                         bmcr |= BMCR_SPEED1000;
3705                         break;
3706                 }
3707
3708                 if (tp->link_config.duplex == DUPLEX_FULL)
3709                         bmcr |= BMCR_FULLDPLX;
3710
3711                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3712                     (bmcr != orig_bmcr)) {
3713                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3714                         for (i = 0; i < 1500; i++) {
3715                                 u32 tmp;
3716
3717                                 udelay(10);
3718                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3719                                     tg3_readphy(tp, MII_BMSR, &tmp))
3720                                         continue;
3721                                 if (!(tmp & BMSR_LSTATUS)) {
3722                                         udelay(40);
3723                                         break;
3724                                 }
3725                         }
3726                         tg3_writephy(tp, MII_BMCR, bmcr);
3727                         udelay(40);
3728                 }
3729         } else {
3730                 tg3_writephy(tp, MII_BMCR,
3731                              BMCR_ANENABLE | BMCR_ANRESTART);
3732         }
3733 }
3734
3735 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3736 {
3737         int err;
3738
3739         /* Turn off tap power management. */
3740         /* Set Extended packet length bit */
3741         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3742
3743         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3744         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3745         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3746         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3747         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3748
3749         udelay(40);
3750
3751         return err;
3752 }
3753
3754 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3755 {
3756         u32 advmsk, tgtadv, advertising;
3757
3758         advertising = tp->link_config.advertising;
3759         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3760
3761         advmsk = ADVERTISE_ALL;
3762         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3763                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3764                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3765         }
3766
3767         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3768                 return false;
3769
3770         if ((*lcladv & advmsk) != tgtadv)
3771                 return false;
3772
3773         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3774                 u32 tg3_ctrl;
3775
3776                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
3777
3778                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3779                         return false;
3780
3781                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3782                 if (tg3_ctrl != tgtadv)
3783                         return false;
3784         }
3785
3786         return true;
3787 }
3788
3789 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
3790 {
3791         u32 lpeth = 0;
3792
3793         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3794                 u32 val;
3795
3796                 if (tg3_readphy(tp, MII_STAT1000, &val))
3797                         return false;
3798
3799                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
3800         }
3801
3802         if (tg3_readphy(tp, MII_LPA, rmtadv))
3803                 return false;
3804
3805         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
3806         tp->link_config.rmt_adv = lpeth;
3807
3808         return true;
3809 }
3810
3811 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3812 {
3813         int current_link_up;
3814         u32 bmsr, val;
3815         u32 lcl_adv, rmt_adv;
3816         u16 current_speed;
3817         u8 current_duplex;
3818         int i, err;
3819
3820         tw32(MAC_EVENT, 0);
3821
3822         tw32_f(MAC_STATUS,
3823              (MAC_STATUS_SYNC_CHANGED |
3824               MAC_STATUS_CFG_CHANGED |
3825               MAC_STATUS_MI_COMPLETION |
3826               MAC_STATUS_LNKSTATE_CHANGED));
3827         udelay(40);
3828
3829         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3830                 tw32_f(MAC_MI_MODE,
3831                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3832                 udelay(80);
3833         }
3834
3835         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3836
3837         /* Some third-party PHYs need to be reset on link going
3838          * down.
3839          */
3840         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3841              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3842              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3843             netif_carrier_ok(tp->dev)) {
3844                 tg3_readphy(tp, MII_BMSR, &bmsr);
3845                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3846                     !(bmsr & BMSR_LSTATUS))
3847                         force_reset = 1;
3848         }
3849         if (force_reset)
3850                 tg3_phy_reset(tp);
3851
3852         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3853                 tg3_readphy(tp, MII_BMSR, &bmsr);
3854                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3855                     !tg3_flag(tp, INIT_COMPLETE))
3856                         bmsr = 0;
3857
3858                 if (!(bmsr & BMSR_LSTATUS)) {
3859                         err = tg3_init_5401phy_dsp(tp);
3860                         if (err)
3861                                 return err;
3862
3863                         tg3_readphy(tp, MII_BMSR, &bmsr);
3864                         for (i = 0; i < 1000; i++) {
3865                                 udelay(10);
3866                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3867                                     (bmsr & BMSR_LSTATUS)) {
3868                                         udelay(40);
3869                                         break;
3870                                 }
3871                         }
3872
3873                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3874                             TG3_PHY_REV_BCM5401_B0 &&
3875                             !(bmsr & BMSR_LSTATUS) &&
3876                             tp->link_config.active_speed == SPEED_1000) {
3877                                 err = tg3_phy_reset(tp);
3878                                 if (!err)
3879                                         err = tg3_init_5401phy_dsp(tp);
3880                                 if (err)
3881                                         return err;
3882                         }
3883                 }
3884         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3885                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3886                 /* 5701 {A0,B0} CRC bug workaround */
3887                 tg3_writephy(tp, 0x15, 0x0a75);
3888                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3889                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3890                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3891         }
3892
3893         /* Clear pending interrupts... */
3894         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3895         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3896
3897         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3898                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3899         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3900                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3901
3902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3904                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3905                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3906                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3907                 else
3908                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3909         }
3910
3911         current_link_up = 0;
3912         current_speed = SPEED_INVALID;
3913         current_duplex = DUPLEX_INVALID;
3914         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
3915         tp->link_config.rmt_adv = 0;
3916
3917         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3918                 err = tg3_phy_auxctl_read(tp,
3919                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3920                                           &val);
3921                 if (!err && !(val & (1 << 10))) {
3922                         tg3_phy_auxctl_write(tp,
3923                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3924                                              val | (1 << 10));
3925                         goto relink;
3926                 }
3927         }
3928
3929         bmsr = 0;
3930         for (i = 0; i < 100; i++) {
3931                 tg3_readphy(tp, MII_BMSR, &bmsr);
3932                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3933                     (bmsr & BMSR_LSTATUS))
3934                         break;
3935                 udelay(40);
3936         }
3937
3938         if (bmsr & BMSR_LSTATUS) {
3939                 u32 aux_stat, bmcr;
3940
3941                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3942                 for (i = 0; i < 2000; i++) {
3943                         udelay(10);
3944                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3945                             aux_stat)
3946                                 break;
3947                 }
3948
3949                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3950                                              &current_speed,
3951                                              &current_duplex);
3952
3953                 bmcr = 0;
3954                 for (i = 0; i < 200; i++) {
3955                         tg3_readphy(tp, MII_BMCR, &bmcr);
3956                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3957                                 continue;
3958                         if (bmcr && bmcr != 0x7fff)
3959                                 break;
3960                         udelay(10);
3961                 }
3962
3963                 lcl_adv = 0;
3964                 rmt_adv = 0;
3965
3966                 tp->link_config.active_speed = current_speed;
3967                 tp->link_config.active_duplex = current_duplex;
3968
3969                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3970                         if ((bmcr & BMCR_ANENABLE) &&
3971                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
3972                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
3973                                 current_link_up = 1;
3974                 } else {
3975                         if (!(bmcr & BMCR_ANENABLE) &&
3976                             tp->link_config.speed == current_speed &&
3977                             tp->link_config.duplex == current_duplex &&
3978                             tp->link_config.flowctrl ==
3979                             tp->link_config.active_flowctrl) {
3980                                 current_link_up = 1;
3981                         }
3982                 }
3983
3984                 if (current_link_up == 1 &&
3985                     tp->link_config.active_duplex == DUPLEX_FULL) {
3986                         u32 reg, bit;
3987
3988                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3989                                 reg = MII_TG3_FET_GEN_STAT;
3990                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
3991                         } else {
3992                                 reg = MII_TG3_EXT_STAT;
3993                                 bit = MII_TG3_EXT_STAT_MDIX;
3994                         }
3995
3996                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
3997                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
3998
3999                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4000                 }
4001         }
4002
4003 relink:
4004         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4005                 tg3_phy_copper_begin(tp);
4006
4007                 tg3_readphy(tp, MII_BMSR, &bmsr);
4008                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4009                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4010                         current_link_up = 1;
4011         }
4012
4013         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4014         if (current_link_up == 1) {
4015                 if (tp->link_config.active_speed == SPEED_100 ||
4016                     tp->link_config.active_speed == SPEED_10)
4017                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4018                 else
4019                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4020         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4021                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4022         else
4023                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4024
4025         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4026         if (tp->link_config.active_duplex == DUPLEX_HALF)
4027                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4028
4029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4030                 if (current_link_up == 1 &&
4031                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4032                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4033                 else
4034                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4035         }
4036
4037         /* ??? Without this setting Netgear GA302T PHY does not
4038          * ??? send/receive packets...
4039          */
4040         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4041             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4042                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4043                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4044                 udelay(80);
4045         }
4046
4047         tw32_f(MAC_MODE, tp->mac_mode);
4048         udelay(40);
4049
4050         tg3_phy_eee_adjust(tp, current_link_up);
4051
4052         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4053                 /* Polled via timer. */
4054                 tw32_f(MAC_EVENT, 0);
4055         } else {
4056                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4057         }
4058         udelay(40);
4059
4060         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4061             current_link_up == 1 &&
4062             tp->link_config.active_speed == SPEED_1000 &&
4063             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4064                 udelay(120);
4065                 tw32_f(MAC_STATUS,
4066                      (MAC_STATUS_SYNC_CHANGED |
4067                       MAC_STATUS_CFG_CHANGED));
4068                 udelay(40);
4069                 tg3_write_mem(tp,
4070                               NIC_SRAM_FIRMWARE_MBOX,
4071                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4072         }
4073
4074         /* Prevent send BD corruption. */
4075         if (tg3_flag(tp, CLKREQ_BUG)) {
4076                 u16 oldlnkctl, newlnkctl;
4077
4078                 pci_read_config_word(tp->pdev,
4079                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4080                                      &oldlnkctl);
4081                 if (tp->link_config.active_speed == SPEED_100 ||
4082                     tp->link_config.active_speed == SPEED_10)
4083                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4084                 else
4085                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4086                 if (newlnkctl != oldlnkctl)
4087                         pci_write_config_word(tp->pdev,
4088                                               pci_pcie_cap(tp->pdev) +
4089                                               PCI_EXP_LNKCTL, newlnkctl);
4090         }
4091
4092         if (current_link_up != netif_carrier_ok(tp->dev)) {
4093                 if (current_link_up)
4094                         netif_carrier_on(tp->dev);
4095                 else
4096                         netif_carrier_off(tp->dev);
4097                 tg3_link_report(tp);
4098         }
4099
4100         return 0;
4101 }
4102
4103 struct tg3_fiber_aneginfo {
4104         int state;
4105 #define ANEG_STATE_UNKNOWN              0
4106 #define ANEG_STATE_AN_ENABLE            1
4107 #define ANEG_STATE_RESTART_INIT         2
4108 #define ANEG_STATE_RESTART              3
4109 #define ANEG_STATE_DISABLE_LINK_OK      4
4110 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4111 #define ANEG_STATE_ABILITY_DETECT       6
4112 #define ANEG_STATE_ACK_DETECT_INIT      7
4113 #define ANEG_STATE_ACK_DETECT           8
4114 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4115 #define ANEG_STATE_COMPLETE_ACK         10
4116 #define ANEG_STATE_IDLE_DETECT_INIT     11
4117 #define ANEG_STATE_IDLE_DETECT          12
4118 #define ANEG_STATE_LINK_OK              13
4119 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4120 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4121
4122         u32 flags;
4123 #define MR_AN_ENABLE            0x00000001
4124 #define MR_RESTART_AN           0x00000002
4125 #define MR_AN_COMPLETE          0x00000004
4126 #define MR_PAGE_RX              0x00000008
4127 #define MR_NP_LOADED            0x00000010
4128 #define MR_TOGGLE_TX            0x00000020
4129 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4130 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4131 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4132 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4133 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4134 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4135 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4136 #define MR_TOGGLE_RX            0x00002000
4137 #define MR_NP_RX                0x00004000
4138
4139 #define MR_LINK_OK              0x80000000
4140
4141         unsigned long link_time, cur_time;
4142
4143         u32 ability_match_cfg;
4144         int ability_match_count;
4145
4146         char ability_match, idle_match, ack_match;
4147
4148         u32 txconfig, rxconfig;
4149 #define ANEG_CFG_NP             0x00000080
4150 #define ANEG_CFG_ACK            0x00000040
4151 #define ANEG_CFG_RF2            0x00000020
4152 #define ANEG_CFG_RF1            0x00000010
4153 #define ANEG_CFG_PS2            0x00000001
4154 #define ANEG_CFG_PS1            0x00008000
4155 #define ANEG_CFG_HD             0x00004000
4156 #define ANEG_CFG_FD             0x00002000
4157 #define ANEG_CFG_INVAL          0x00001f06
4158
4159 };
4160 #define ANEG_OK         0
4161 #define ANEG_DONE       1
4162 #define ANEG_TIMER_ENAB 2
4163 #define ANEG_FAILED     -1
4164
4165 #define ANEG_STATE_SETTLE_TIME  10000
4166
4167 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4168                                    struct tg3_fiber_aneginfo *ap)
4169 {
4170         u16 flowctrl;
4171         unsigned long delta;
4172         u32 rx_cfg_reg;
4173         int ret;
4174
4175         if (ap->state == ANEG_STATE_UNKNOWN) {
4176                 ap->rxconfig = 0;
4177                 ap->link_time = 0;
4178                 ap->cur_time = 0;
4179                 ap->ability_match_cfg = 0;
4180                 ap->ability_match_count = 0;
4181                 ap->ability_match = 0;
4182                 ap->idle_match = 0;
4183                 ap->ack_match = 0;
4184         }
4185         ap->cur_time++;
4186
4187         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4188                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4189
4190                 if (rx_cfg_reg != ap->ability_match_cfg) {
4191                         ap->ability_match_cfg = rx_cfg_reg;
4192                         ap->ability_match = 0;
4193                         ap->ability_match_count = 0;
4194                 } else {
4195                         if (++ap->ability_match_count > 1) {
4196                                 ap->ability_match = 1;
4197                                 ap->ability_match_cfg = rx_cfg_reg;
4198                         }
4199                 }
4200                 if (rx_cfg_reg & ANEG_CFG_ACK)
4201                         ap->ack_match = 1;
4202                 else
4203                         ap->ack_match = 0;
4204
4205                 ap->idle_match = 0;
4206         } else {
4207                 ap->idle_match = 1;
4208                 ap->ability_match_cfg = 0;
4209                 ap->ability_match_count = 0;
4210                 ap->ability_match = 0;
4211                 ap->ack_match = 0;
4212
4213                 rx_cfg_reg = 0;
4214         }
4215
4216         ap->rxconfig = rx_cfg_reg;
4217         ret = ANEG_OK;
4218
4219         switch (ap->state) {
4220         case ANEG_STATE_UNKNOWN:
4221                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4222                         ap->state = ANEG_STATE_AN_ENABLE;
4223
4224                 /* fallthru */
4225         case ANEG_STATE_AN_ENABLE:
4226                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4227                 if (ap->flags & MR_AN_ENABLE) {
4228                         ap->link_time = 0;
4229                         ap->cur_time = 0;
4230                         ap->ability_match_cfg = 0;
4231                         ap->ability_match_count = 0;
4232                         ap->ability_match = 0;
4233                         ap->idle_match = 0;
4234                         ap->ack_match = 0;
4235
4236                         ap->state = ANEG_STATE_RESTART_INIT;
4237                 } else {
4238                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4239                 }
4240                 break;
4241
4242         case ANEG_STATE_RESTART_INIT:
4243                 ap->link_time = ap->cur_time;
4244                 ap->flags &= ~(MR_NP_LOADED);
4245                 ap->txconfig = 0;
4246                 tw32(MAC_TX_AUTO_NEG, 0);
4247                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4248                 tw32_f(MAC_MODE, tp->mac_mode);
4249                 udelay(40);
4250
4251                 ret = ANEG_TIMER_ENAB;
4252                 ap->state = ANEG_STATE_RESTART;
4253
4254                 /* fallthru */
4255         case ANEG_STATE_RESTART:
4256                 delta = ap->cur_time - ap->link_time;
4257                 if (delta > ANEG_STATE_SETTLE_TIME)
4258                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4259                 else
4260                         ret = ANEG_TIMER_ENAB;
4261                 break;
4262
4263         case ANEG_STATE_DISABLE_LINK_OK:
4264                 ret = ANEG_DONE;
4265                 break;
4266
4267         case ANEG_STATE_ABILITY_DETECT_INIT:
4268                 ap->flags &= ~(MR_TOGGLE_TX);
4269                 ap->txconfig = ANEG_CFG_FD;
4270                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4271                 if (flowctrl & ADVERTISE_1000XPAUSE)
4272                         ap->txconfig |= ANEG_CFG_PS1;
4273                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4274                         ap->txconfig |= ANEG_CFG_PS2;
4275                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4276                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4277                 tw32_f(MAC_MODE, tp->mac_mode);
4278                 udelay(40);
4279
4280                 ap->state = ANEG_STATE_ABILITY_DETECT;
4281                 break;
4282
4283         case ANEG_STATE_ABILITY_DETECT:
4284                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4285                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4286                 break;
4287
4288         case ANEG_STATE_ACK_DETECT_INIT:
4289                 ap->txconfig |= ANEG_CFG_ACK;
4290                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4291                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4292                 tw32_f(MAC_MODE, tp->mac_mode);
4293                 udelay(40);
4294
4295                 ap->state = ANEG_STATE_ACK_DETECT;
4296
4297                 /* fallthru */
4298         case ANEG_STATE_ACK_DETECT:
4299                 if (ap->ack_match != 0) {
4300                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4301                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4302                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4303                         } else {
4304                                 ap->state = ANEG_STATE_AN_ENABLE;
4305                         }
4306                 } else if (ap->ability_match != 0 &&
4307                            ap->rxconfig == 0) {
4308                         ap->state = ANEG_STATE_AN_ENABLE;
4309                 }
4310                 break;
4311
4312         case ANEG_STATE_COMPLETE_ACK_INIT:
4313                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4314                         ret = ANEG_FAILED;
4315                         break;
4316                 }
4317                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4318                                MR_LP_ADV_HALF_DUPLEX |
4319                                MR_LP_ADV_SYM_PAUSE |
4320                                MR_LP_ADV_ASYM_PAUSE |
4321                                MR_LP_ADV_REMOTE_FAULT1 |
4322                                MR_LP_ADV_REMOTE_FAULT2 |
4323                                MR_LP_ADV_NEXT_PAGE |
4324                                MR_TOGGLE_RX |
4325                                MR_NP_RX);
4326                 if (ap->rxconfig & ANEG_CFG_FD)
4327                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4328                 if (ap->rxconfig & ANEG_CFG_HD)
4329                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4330                 if (ap->rxconfig & ANEG_CFG_PS1)
4331                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4332                 if (ap->rxconfig & ANEG_CFG_PS2)
4333                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4334                 if (ap->rxconfig & ANEG_CFG_RF1)
4335                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4336                 if (ap->rxconfig & ANEG_CFG_RF2)
4337                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4338                 if (ap->rxconfig & ANEG_CFG_NP)
4339                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4340
4341                 ap->link_time = ap->cur_time;
4342
4343                 ap->flags ^= (MR_TOGGLE_TX);
4344                 if (ap->rxconfig & 0x0008)
4345                         ap->flags |= MR_TOGGLE_RX;
4346                 if (ap->rxconfig & ANEG_CFG_NP)
4347                         ap->flags |= MR_NP_RX;
4348                 ap->flags |= MR_PAGE_RX;
4349
4350                 ap->state = ANEG_STATE_COMPLETE_ACK;
4351                 ret = ANEG_TIMER_ENAB;
4352                 break;
4353
4354         case ANEG_STATE_COMPLETE_ACK:
4355                 if (ap->ability_match != 0 &&
4356                     ap->rxconfig == 0) {
4357                         ap->state = ANEG_STATE_AN_ENABLE;
4358                         break;
4359                 }
4360                 delta = ap->cur_time - ap->link_time;
4361                 if (delta > ANEG_STATE_SETTLE_TIME) {
4362                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4363                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4364                         } else {
4365                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4366                                     !(ap->flags & MR_NP_RX)) {
4367                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4368                                 } else {
4369                                         ret = ANEG_FAILED;
4370                                 }
4371                         }
4372                 }
4373                 break;
4374
4375         case ANEG_STATE_IDLE_DETECT_INIT:
4376                 ap->link_time = ap->cur_time;
4377                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4378                 tw32_f(MAC_MODE, tp->mac_mode);
4379                 udelay(40);
4380
4381                 ap->state = ANEG_STATE_IDLE_DETECT;
4382                 ret = ANEG_TIMER_ENAB;
4383                 break;
4384
4385         case ANEG_STATE_IDLE_DETECT:
4386                 if (ap->ability_match != 0 &&
4387                     ap->rxconfig == 0) {
4388                         ap->state = ANEG_STATE_AN_ENABLE;
4389                         break;
4390                 }
4391                 delta = ap->cur_time - ap->link_time;
4392                 if (delta > ANEG_STATE_SETTLE_TIME) {
4393                         /* XXX another gem from the Broadcom driver :( */
4394                         ap->state = ANEG_STATE_LINK_OK;
4395                 }
4396                 break;
4397
4398         case ANEG_STATE_LINK_OK:
4399                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4400                 ret = ANEG_DONE;
4401                 break;
4402
4403         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4404                 /* ??? unimplemented */
4405                 break;
4406
4407         case ANEG_STATE_NEXT_PAGE_WAIT:
4408                 /* ??? unimplemented */
4409                 break;
4410
4411         default:
4412                 ret = ANEG_FAILED;
4413                 break;
4414         }
4415
4416         return ret;
4417 }
4418
4419 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4420 {
4421         int res = 0;
4422         struct tg3_fiber_aneginfo aninfo;
4423         int status = ANEG_FAILED;
4424         unsigned int tick;
4425         u32 tmp;
4426
4427         tw32_f(MAC_TX_AUTO_NEG, 0);
4428
4429         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4430         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4431         udelay(40);
4432
4433         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4434         udelay(40);
4435
4436         memset(&aninfo, 0, sizeof(aninfo));
4437         aninfo.flags |= MR_AN_ENABLE;
4438         aninfo.state = ANEG_STATE_UNKNOWN;
4439         aninfo.cur_time = 0;
4440         tick = 0;
4441         while (++tick < 195000) {
4442                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4443                 if (status == ANEG_DONE || status == ANEG_FAILED)
4444                         break;
4445
4446                 udelay(1);
4447         }
4448
4449         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4450         tw32_f(MAC_MODE, tp->mac_mode);
4451         udelay(40);
4452
4453         *txflags = aninfo.txconfig;
4454         *rxflags = aninfo.flags;
4455
4456         if (status == ANEG_DONE &&
4457             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4458                              MR_LP_ADV_FULL_DUPLEX)))
4459                 res = 1;
4460
4461         return res;
4462 }
4463
4464 static void tg3_init_bcm8002(struct tg3 *tp)
4465 {
4466         u32 mac_status = tr32(MAC_STATUS);
4467         int i;
4468
4469         /* Reset when initting first time or we have a link. */
4470         if (tg3_flag(tp, INIT_COMPLETE) &&
4471             !(mac_status & MAC_STATUS_PCS_SYNCED))
4472                 return;
4473
4474         /* Set PLL lock range. */
4475         tg3_writephy(tp, 0x16, 0x8007);
4476
4477         /* SW reset */
4478         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4479
4480         /* Wait for reset to complete. */
4481         /* XXX schedule_timeout() ... */
4482         for (i = 0; i < 500; i++)
4483                 udelay(10);
4484
4485         /* Config mode; select PMA/Ch 1 regs. */
4486         tg3_writephy(tp, 0x10, 0x8411);
4487
4488         /* Enable auto-lock and comdet, select txclk for tx. */
4489         tg3_writephy(tp, 0x11, 0x0a10);
4490
4491         tg3_writephy(tp, 0x18, 0x00a0);
4492         tg3_writephy(tp, 0x16, 0x41ff);
4493
4494         /* Assert and deassert POR. */
4495         tg3_writephy(tp, 0x13, 0x0400);
4496         udelay(40);
4497         tg3_writephy(tp, 0x13, 0x0000);
4498
4499         tg3_writephy(tp, 0x11, 0x0a50);
4500         udelay(40);
4501         tg3_writephy(tp, 0x11, 0x0a10);
4502
4503         /* Wait for signal to stabilize */
4504         /* XXX schedule_timeout() ... */
4505         for (i = 0; i < 15000; i++)
4506                 udelay(10);
4507
4508         /* Deselect the channel register so we can read the PHYID
4509          * later.
4510          */
4511         tg3_writephy(tp, 0x10, 0x8011);
4512 }
4513
4514 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4515 {
4516         u16 flowctrl;
4517         u32 sg_dig_ctrl, sg_dig_status;
4518         u32 serdes_cfg, expected_sg_dig_ctrl;
4519         int workaround, port_a;
4520         int current_link_up;
4521
4522         serdes_cfg = 0;
4523         expected_sg_dig_ctrl = 0;
4524         workaround = 0;
4525         port_a = 1;
4526         current_link_up = 0;
4527
4528         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4529             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4530                 workaround = 1;
4531                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4532                         port_a = 0;
4533
4534                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4535                 /* preserve bits 20-23 for voltage regulator */
4536                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4537         }
4538
4539         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4540
4541         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4542                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4543                         if (workaround) {
4544                                 u32 val = serdes_cfg;
4545
4546                                 if (port_a)
4547                                         val |= 0xc010000;
4548                                 else
4549                                         val |= 0x4010000;
4550                                 tw32_f(MAC_SERDES_CFG, val);
4551                         }
4552
4553                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4554                 }
4555                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4556                         tg3_setup_flow_control(tp, 0, 0);
4557                         current_link_up = 1;
4558                 }
4559                 goto out;
4560         }
4561
4562         /* Want auto-negotiation.  */
4563         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4564
4565         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4566         if (flowctrl & ADVERTISE_1000XPAUSE)
4567                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4568         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4569                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4570
4571         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4572                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4573                     tp->serdes_counter &&
4574                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4575                                     MAC_STATUS_RCVD_CFG)) ==
4576                      MAC_STATUS_PCS_SYNCED)) {
4577                         tp->serdes_counter--;
4578                         current_link_up = 1;
4579                         goto out;
4580                 }
4581 restart_autoneg:
4582                 if (workaround)
4583                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4584                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4585                 udelay(5);
4586                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4587
4588                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4589                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4590         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4591                                  MAC_STATUS_SIGNAL_DET)) {
4592                 sg_dig_status = tr32(SG_DIG_STATUS);
4593                 mac_status = tr32(MAC_STATUS);
4594
4595                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4596                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4597                         u32 local_adv = 0, remote_adv = 0;
4598
4599                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4600                                 local_adv |= ADVERTISE_1000XPAUSE;
4601                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4602                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4603
4604                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4605                                 remote_adv |= LPA_1000XPAUSE;
4606                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4607                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4608
4609                         tp->link_config.rmt_adv =
4610                                            mii_adv_to_ethtool_adv_x(remote_adv);
4611
4612                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4613                         current_link_up = 1;
4614                         tp->serdes_counter = 0;
4615                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4616                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4617                         if (tp->serdes_counter)
4618                                 tp->serdes_counter--;
4619                         else {
4620                                 if (workaround) {
4621                                         u32 val = serdes_cfg;
4622
4623                                         if (port_a)
4624                                                 val |= 0xc010000;
4625                                         else
4626                                                 val |= 0x4010000;
4627
4628                                         tw32_f(MAC_SERDES_CFG, val);
4629                                 }
4630
4631                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4632                                 udelay(40);
4633
4634                                 /* Link parallel detection - link is up */
4635                                 /* only if we have PCS_SYNC and not */
4636                                 /* receiving config code words */
4637                                 mac_status = tr32(MAC_STATUS);
4638                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4639                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4640                                         tg3_setup_flow_control(tp, 0, 0);
4641                                         current_link_up = 1;
4642                                         tp->phy_flags |=
4643                                                 TG3_PHYFLG_PARALLEL_DETECT;
4644                                         tp->serdes_counter =
4645                                                 SERDES_PARALLEL_DET_TIMEOUT;
4646                                 } else
4647                                         goto restart_autoneg;
4648                         }
4649                 }
4650         } else {
4651                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4652                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4653         }
4654
4655 out:
4656         return current_link_up;
4657 }
4658
4659 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4660 {
4661         int current_link_up = 0;
4662
4663         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4664                 goto out;
4665
4666         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4667                 u32 txflags, rxflags;
4668                 int i;
4669
4670                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4671                         u32 local_adv = 0, remote_adv = 0;
4672
4673                         if (txflags & ANEG_CFG_PS1)
4674                                 local_adv |= ADVERTISE_1000XPAUSE;
4675                         if (txflags & ANEG_CFG_PS2)
4676                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4677
4678                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4679                                 remote_adv |= LPA_1000XPAUSE;
4680                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4681                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4682
4683                         tp->link_config.rmt_adv =
4684                                            mii_adv_to_ethtool_adv_x(remote_adv);
4685
4686                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4687
4688                         current_link_up = 1;
4689                 }
4690                 for (i = 0; i < 30; i++) {
4691                         udelay(20);
4692                         tw32_f(MAC_STATUS,
4693                                (MAC_STATUS_SYNC_CHANGED |
4694                                 MAC_STATUS_CFG_CHANGED));
4695                         udelay(40);
4696                         if ((tr32(MAC_STATUS) &
4697                              (MAC_STATUS_SYNC_CHANGED |
4698                               MAC_STATUS_CFG_CHANGED)) == 0)
4699                                 break;
4700                 }
4701
4702                 mac_status = tr32(MAC_STATUS);
4703                 if (current_link_up == 0 &&
4704                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4705                     !(mac_status & MAC_STATUS_RCVD_CFG))
4706                         current_link_up = 1;
4707         } else {
4708                 tg3_setup_flow_control(tp, 0, 0);
4709
4710                 /* Forcing 1000FD link up. */
4711                 current_link_up = 1;
4712
4713                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4714                 udelay(40);
4715
4716                 tw32_f(MAC_MODE, tp->mac_mode);
4717                 udelay(40);
4718         }
4719
4720 out:
4721         return current_link_up;
4722 }
4723
4724 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4725 {
4726         u32 orig_pause_cfg;
4727         u16 orig_active_speed;
4728         u8 orig_active_duplex;
4729         u32 mac_status;
4730         int current_link_up;
4731         int i;
4732
4733         orig_pause_cfg = tp->link_config.active_flowctrl;
4734         orig_active_speed = tp->link_config.active_speed;
4735         orig_active_duplex = tp->link_config.active_duplex;
4736
4737         if (!tg3_flag(tp, HW_AUTONEG) &&
4738             netif_carrier_ok(tp->dev) &&
4739             tg3_flag(tp, INIT_COMPLETE)) {
4740                 mac_status = tr32(MAC_STATUS);
4741                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4742                                MAC_STATUS_SIGNAL_DET |
4743                                MAC_STATUS_CFG_CHANGED |
4744                                MAC_STATUS_RCVD_CFG);
4745                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4746                                    MAC_STATUS_SIGNAL_DET)) {
4747                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4748                                             MAC_STATUS_CFG_CHANGED));
4749                         return 0;
4750                 }
4751         }
4752
4753         tw32_f(MAC_TX_AUTO_NEG, 0);
4754
4755         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4756         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4757         tw32_f(MAC_MODE, tp->mac_mode);
4758         udelay(40);
4759
4760         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4761                 tg3_init_bcm8002(tp);
4762
4763         /* Enable link change event even when serdes polling.  */
4764         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4765         udelay(40);
4766
4767         current_link_up = 0;
4768         tp->link_config.rmt_adv = 0;
4769         mac_status = tr32(MAC_STATUS);
4770
4771         if (tg3_flag(tp, HW_AUTONEG))
4772                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4773         else
4774                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4775
4776         tp->napi[0].hw_status->status =
4777                 (SD_STATUS_UPDATED |
4778                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4779
4780         for (i = 0; i < 100; i++) {
4781                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4782                                     MAC_STATUS_CFG_CHANGED));
4783                 udelay(5);
4784                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4785                                          MAC_STATUS_CFG_CHANGED |
4786                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4787                         break;
4788         }
4789
4790         mac_status = tr32(MAC_STATUS);
4791         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4792                 current_link_up = 0;
4793                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4794                     tp->serdes_counter == 0) {
4795                         tw32_f(MAC_MODE, (tp->mac_mode |
4796                                           MAC_MODE_SEND_CONFIGS));
4797                         udelay(1);
4798                         tw32_f(MAC_MODE, tp->mac_mode);
4799                 }
4800         }
4801
4802         if (current_link_up == 1) {
4803                 tp->link_config.active_speed = SPEED_1000;
4804                 tp->link_config.active_duplex = DUPLEX_FULL;
4805                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4806                                     LED_CTRL_LNKLED_OVERRIDE |
4807                                     LED_CTRL_1000MBPS_ON));
4808         } else {
4809                 tp->link_config.active_speed = SPEED_INVALID;
4810                 tp->link_config.active_duplex = DUPLEX_INVALID;
4811                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4812                                     LED_CTRL_LNKLED_OVERRIDE |
4813                                     LED_CTRL_TRAFFIC_OVERRIDE));
4814         }
4815
4816         if (current_link_up != netif_carrier_ok(tp->dev)) {
4817                 if (current_link_up)
4818                         netif_carrier_on(tp->dev);
4819                 else
4820                         netif_carrier_off(tp->dev);
4821                 tg3_link_report(tp);
4822         } else {
4823                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4824                 if (orig_pause_cfg != now_pause_cfg ||
4825                     orig_active_speed != tp->link_config.active_speed ||
4826                     orig_active_duplex != tp->link_config.active_duplex)
4827                         tg3_link_report(tp);
4828         }
4829
4830         return 0;
4831 }
4832
4833 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4834 {
4835         int current_link_up, err = 0;
4836         u32 bmsr, bmcr;
4837         u16 current_speed;
4838         u8 current_duplex;
4839         u32 local_adv, remote_adv;
4840
4841         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4842         tw32_f(MAC_MODE, tp->mac_mode);
4843         udelay(40);
4844
4845         tw32(MAC_EVENT, 0);
4846
4847         tw32_f(MAC_STATUS,
4848              (MAC_STATUS_SYNC_CHANGED |
4849               MAC_STATUS_CFG_CHANGED |
4850               MAC_STATUS_MI_COMPLETION |
4851               MAC_STATUS_LNKSTATE_CHANGED));
4852         udelay(40);
4853
4854         if (force_reset)
4855                 tg3_phy_reset(tp);
4856
4857         current_link_up = 0;
4858         current_speed = SPEED_INVALID;
4859         current_duplex = DUPLEX_INVALID;
4860         tp->link_config.rmt_adv = 0;
4861
4862         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4863         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4864         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4865                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4866                         bmsr |= BMSR_LSTATUS;
4867                 else
4868                         bmsr &= ~BMSR_LSTATUS;
4869         }
4870
4871         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4872
4873         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4874             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4875                 /* do nothing, just check for link up at the end */
4876         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4877                 u32 adv, newadv;
4878
4879                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4880                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4881                                  ADVERTISE_1000XPAUSE |
4882                                  ADVERTISE_1000XPSE_ASYM |
4883                                  ADVERTISE_SLCT);
4884
4885                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4886                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
4887
4888                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
4889                         tg3_writephy(tp, MII_ADVERTISE, newadv);
4890                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4891                         tg3_writephy(tp, MII_BMCR, bmcr);
4892
4893                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4894                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4895                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896
4897                         return err;
4898                 }
4899         } else {
4900                 u32 new_bmcr;
4901
4902                 bmcr &= ~BMCR_SPEED1000;
4903                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4904
4905                 if (tp->link_config.duplex == DUPLEX_FULL)
4906                         new_bmcr |= BMCR_FULLDPLX;
4907
4908                 if (new_bmcr != bmcr) {
4909                         /* BMCR_SPEED1000 is a reserved bit that needs
4910                          * to be set on write.
4911                          */
4912                         new_bmcr |= BMCR_SPEED1000;
4913
4914                         /* Force a linkdown */
4915                         if (netif_carrier_ok(tp->dev)) {
4916                                 u32 adv;
4917
4918                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4919                                 adv &= ~(ADVERTISE_1000XFULL |
4920                                          ADVERTISE_1000XHALF |
4921                                          ADVERTISE_SLCT);
4922                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4923                                 tg3_writephy(tp, MII_BMCR, bmcr |
4924                                                            BMCR_ANRESTART |
4925                                                            BMCR_ANENABLE);
4926                                 udelay(10);
4927                                 netif_carrier_off(tp->dev);
4928                         }
4929                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4930                         bmcr = new_bmcr;
4931                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4932                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4933                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4934                             ASIC_REV_5714) {
4935                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4936                                         bmsr |= BMSR_LSTATUS;
4937                                 else
4938                                         bmsr &= ~BMSR_LSTATUS;
4939                         }
4940                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4941                 }
4942         }
4943
4944         if (bmsr & BMSR_LSTATUS) {
4945                 current_speed = SPEED_1000;
4946                 current_link_up = 1;
4947                 if (bmcr & BMCR_FULLDPLX)
4948                         current_duplex = DUPLEX_FULL;
4949                 else
4950                         current_duplex = DUPLEX_HALF;
4951
4952                 local_adv = 0;
4953                 remote_adv = 0;
4954
4955                 if (bmcr & BMCR_ANENABLE) {
4956                         u32 common;
4957
4958                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4959                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4960                         common = local_adv & remote_adv;
4961                         if (common & (ADVERTISE_1000XHALF |
4962                                       ADVERTISE_1000XFULL)) {
4963                                 if (common & ADVERTISE_1000XFULL)
4964                                         current_duplex = DUPLEX_FULL;
4965                                 else
4966                                         current_duplex = DUPLEX_HALF;
4967
4968                                 tp->link_config.rmt_adv =
4969                                            mii_adv_to_ethtool_adv_x(remote_adv);
4970                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4971                                 /* Link is up via parallel detect */
4972                         } else {
4973                                 current_link_up = 0;
4974                         }
4975                 }
4976         }
4977
4978         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4979                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4980
4981         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4982         if (tp->link_config.active_duplex == DUPLEX_HALF)
4983                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4984
4985         tw32_f(MAC_MODE, tp->mac_mode);
4986         udelay(40);
4987
4988         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4989
4990         tp->link_config.active_speed = current_speed;
4991         tp->link_config.active_duplex = current_duplex;
4992
4993         if (current_link_up != netif_carrier_ok(tp->dev)) {
4994                 if (current_link_up)
4995                         netif_carrier_on(tp->dev);
4996                 else {
4997                         netif_carrier_off(tp->dev);
4998                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4999                 }
5000                 tg3_link_report(tp);
5001         }
5002         return err;
5003 }
5004
5005 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5006 {
5007         if (tp->serdes_counter) {
5008                 /* Give autoneg time to complete. */
5009                 tp->serdes_counter--;
5010                 return;
5011         }
5012
5013         if (!netif_carrier_ok(tp->dev) &&
5014             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5015                 u32 bmcr;
5016
5017                 tg3_readphy(tp, MII_BMCR, &bmcr);
5018                 if (bmcr & BMCR_ANENABLE) {
5019                         u32 phy1, phy2;
5020
5021                         /* Select shadow register 0x1f */
5022                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5023                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5024
5025                         /* Select expansion interrupt status register */
5026                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5027                                          MII_TG3_DSP_EXP1_INT_STAT);
5028                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5029                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5030
5031                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5032                                 /* We have signal detect and not receiving
5033                                  * config code words, link is up by parallel
5034                                  * detection.
5035                                  */
5036
5037                                 bmcr &= ~BMCR_ANENABLE;
5038                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5039                                 tg3_writephy(tp, MII_BMCR, bmcr);
5040                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5041                         }
5042                 }
5043         } else if (netif_carrier_ok(tp->dev) &&
5044                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5045                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5046                 u32 phy2;
5047
5048                 /* Select expansion interrupt status register */
5049                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5050                                  MII_TG3_DSP_EXP1_INT_STAT);
5051                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5052                 if (phy2 & 0x20) {
5053                         u32 bmcr;
5054
5055                         /* Config code words received, turn on autoneg. */
5056                         tg3_readphy(tp, MII_BMCR, &bmcr);
5057                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5058
5059                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5060
5061                 }
5062         }
5063 }
5064
5065 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5066 {
5067         u32 val;
5068         int err;
5069
5070         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5071                 err = tg3_setup_fiber_phy(tp, force_reset);
5072         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5073                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5074         else
5075                 err = tg3_setup_copper_phy(tp, force_reset);
5076
5077         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5078                 u32 scale;
5079
5080                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5081                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5082                         scale = 65;
5083                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5084                         scale = 6;
5085                 else
5086                         scale = 12;
5087
5088                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5089                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5090                 tw32(GRC_MISC_CFG, val);
5091         }
5092
5093         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5094               (6 << TX_LENGTHS_IPG_SHIFT);
5095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5096                 val |= tr32(MAC_TX_LENGTHS) &
5097                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5098                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5099
5100         if (tp->link_config.active_speed == SPEED_1000 &&
5101             tp->link_config.active_duplex == DUPLEX_HALF)
5102                 tw32(MAC_TX_LENGTHS, val |
5103                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5104         else
5105                 tw32(MAC_TX_LENGTHS, val |
5106                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5107
5108         if (!tg3_flag(tp, 5705_PLUS)) {
5109                 if (netif_carrier_ok(tp->dev)) {
5110                         tw32(HOSTCC_STAT_COAL_TICKS,
5111                              tp->coal.stats_block_coalesce_usecs);
5112                 } else {
5113                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5114                 }
5115         }
5116
5117         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5118                 val = tr32(PCIE_PWR_MGMT_THRESH);
5119                 if (!netif_carrier_ok(tp->dev))
5120                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5121                               tp->pwrmgmt_thresh;
5122                 else
5123                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5124                 tw32(PCIE_PWR_MGMT_THRESH, val);
5125         }
5126
5127         return err;
5128 }
5129
5130 static inline int tg3_irq_sync(struct tg3 *tp)
5131 {
5132         return tp->irq_sync;
5133 }
5134
5135 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5136 {
5137         int i;
5138
5139         dst = (u32 *)((u8 *)dst + off);
5140         for (i = 0; i < len; i += sizeof(u32))
5141                 *dst++ = tr32(off + i);
5142 }
5143
5144 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5145 {
5146         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5147         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5148         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5149         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5150         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5151         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5152         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5153         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5154         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5155         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5156         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5157         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5158         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5159         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5160         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5161         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5162         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5163         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5164         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5165
5166         if (tg3_flag(tp, SUPPORT_MSIX))
5167                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5168
5169         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5170         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5171         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5172         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5173         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5174         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5175         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5176         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5177
5178         if (!tg3_flag(tp, 5705_PLUS)) {
5179                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5180                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5181                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5182         }
5183
5184         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5185         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5186         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5187         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5188         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5189
5190         if (tg3_flag(tp, NVRAM))
5191                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5192 }
5193
5194 static void tg3_dump_state(struct tg3 *tp)
5195 {
5196         int i;
5197         u32 *regs;
5198
5199         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5200         if (!regs) {
5201                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5202                 return;
5203         }
5204
5205         if (tg3_flag(tp, PCI_EXPRESS)) {
5206                 /* Read up to but not including private PCI registers */
5207                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5208                         regs[i / sizeof(u32)] = tr32(i);
5209         } else
5210                 tg3_dump_legacy_regs(tp, regs);
5211
5212         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5213                 if (!regs[i + 0] && !regs[i + 1] &&
5214                     !regs[i + 2] && !regs[i + 3])
5215                         continue;
5216
5217                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5218                            i * 4,
5219                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5220         }
5221
5222         kfree(regs);
5223
5224         for (i = 0; i < tp->irq_cnt; i++) {
5225                 struct tg3_napi *tnapi = &tp->napi[i];
5226
5227                 /* SW status block */
5228                 netdev_err(tp->dev,
5229                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5230                            i,
5231                            tnapi->hw_status->status,
5232                            tnapi->hw_status->status_tag,
5233                            tnapi->hw_status->rx_jumbo_consumer,
5234                            tnapi->hw_status->rx_consumer,
5235                            tnapi->hw_status->rx_mini_consumer,
5236                            tnapi->hw_status->idx[0].rx_producer,
5237                            tnapi->hw_status->idx[0].tx_consumer);
5238
5239                 netdev_err(tp->dev,
5240                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5241                            i,
5242                            tnapi->last_tag, tnapi->last_irq_tag,
5243                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5244                            tnapi->rx_rcb_ptr,
5245                            tnapi->prodring.rx_std_prod_idx,
5246                            tnapi->prodring.rx_std_cons_idx,
5247                            tnapi->prodring.rx_jmb_prod_idx,
5248                            tnapi->prodring.rx_jmb_cons_idx);
5249         }
5250 }
5251
5252 /* This is called whenever we suspect that the system chipset is re-
5253  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5254  * is bogus tx completions. We try to recover by setting the
5255  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5256  * in the workqueue.
5257  */
5258 static void tg3_tx_recover(struct tg3 *tp)
5259 {
5260         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5261                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5262
5263         netdev_warn(tp->dev,
5264                     "The system may be re-ordering memory-mapped I/O "
5265                     "cycles to the network device, attempting to recover. "
5266                     "Please report the problem to the driver maintainer "
5267                     "and include system chipset information.\n");
5268
5269         spin_lock(&tp->lock);
5270         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5271         spin_unlock(&tp->lock);
5272 }
5273
5274 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5275 {
5276         /* Tell compiler to fetch tx indices from memory. */
5277         barrier();
5278         return tnapi->tx_pending -
5279                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5280 }
5281
5282 /* Tigon3 never reports partial packet sends.  So we do not
5283  * need special logic to handle SKBs that have not had all
5284  * of their frags sent yet, like SunGEM does.
5285  */
5286 static void tg3_tx(struct tg3_napi *tnapi)
5287 {
5288         struct tg3 *tp = tnapi->tp;
5289         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5290         u32 sw_idx = tnapi->tx_cons;
5291         struct netdev_queue *txq;
5292         int index = tnapi - tp->napi;
5293         unsigned int pkts_compl = 0, bytes_compl = 0;
5294
5295         if (tg3_flag(tp, ENABLE_TSS))
5296                 index--;
5297
5298         txq = netdev_get_tx_queue(tp->dev, index);
5299
5300         while (sw_idx != hw_idx) {
5301                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5302                 struct sk_buff *skb = ri->skb;
5303                 int i, tx_bug = 0;
5304
5305                 if (unlikely(skb == NULL)) {
5306                         tg3_tx_recover(tp);
5307                         return;
5308                 }
5309
5310                 pci_unmap_single(tp->pdev,
5311                                  dma_unmap_addr(ri, mapping),
5312                                  skb_headlen(skb),
5313                                  PCI_DMA_TODEVICE);
5314
5315                 ri->skb = NULL;
5316
5317                 while (ri->fragmented) {
5318                         ri->fragmented = false;
5319                         sw_idx = NEXT_TX(sw_idx);
5320                         ri = &tnapi->tx_buffers[sw_idx];
5321                 }
5322
5323                 sw_idx = NEXT_TX(sw_idx);
5324
5325                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5326                         ri = &tnapi->tx_buffers[sw_idx];
5327                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5328                                 tx_bug = 1;
5329
5330                         pci_unmap_page(tp->pdev,
5331                                        dma_unmap_addr(ri, mapping),
5332                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5333                                        PCI_DMA_TODEVICE);
5334
5335                         while (ri->fragmented) {
5336                                 ri->fragmented = false;
5337                                 sw_idx = NEXT_TX(sw_idx);
5338                                 ri = &tnapi->tx_buffers[sw_idx];
5339                         }
5340
5341                         sw_idx = NEXT_TX(sw_idx);
5342                 }
5343
5344                 pkts_compl++;
5345                 bytes_compl += skb->len;
5346
5347                 dev_kfree_skb(skb);
5348
5349                 if (unlikely(tx_bug)) {
5350                         tg3_tx_recover(tp);
5351                         return;
5352                 }
5353         }
5354
5355         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5356
5357         tnapi->tx_cons = sw_idx;
5358
5359         /* Need to make the tx_cons update visible to tg3_start_xmit()
5360          * before checking for netif_queue_stopped().  Without the
5361          * memory barrier, there is a small possibility that tg3_start_xmit()
5362          * will miss it and cause the queue to be stopped forever.
5363          */
5364         smp_mb();
5365
5366         if (unlikely(netif_tx_queue_stopped(txq) &&
5367                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5368                 __netif_tx_lock(txq, smp_processor_id());
5369                 if (netif_tx_queue_stopped(txq) &&
5370                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5371                         netif_tx_wake_queue(txq);
5372                 __netif_tx_unlock(txq);
5373         }
5374 }
5375
5376 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5377 {
5378         if (!ri->data)
5379                 return;
5380
5381         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5382                          map_sz, PCI_DMA_FROMDEVICE);
5383         kfree(ri->data);
5384         ri->data = NULL;
5385 }
5386
5387 /* Returns size of skb allocated or < 0 on error.
5388  *
5389  * We only need to fill in the address because the other members
5390  * of the RX descriptor are invariant, see tg3_init_rings.
5391  *
5392  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5393  * posting buffers we only dirty the first cache line of the RX
5394  * descriptor (containing the address).  Whereas for the RX status
5395  * buffers the cpu only reads the last cacheline of the RX descriptor
5396  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5397  */
5398 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5399                             u32 opaque_key, u32 dest_idx_unmasked)
5400 {
5401         struct tg3_rx_buffer_desc *desc;
5402         struct ring_info *map;
5403         u8 *data;
5404         dma_addr_t mapping;
5405         int skb_size, data_size, dest_idx;
5406
5407         switch (opaque_key) {
5408         case RXD_OPAQUE_RING_STD:
5409                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5410                 desc = &tpr->rx_std[dest_idx];
5411                 map = &tpr->rx_std_buffers[dest_idx];
5412                 data_size = tp->rx_pkt_map_sz;
5413                 break;
5414
5415         case RXD_OPAQUE_RING_JUMBO:
5416                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5417                 desc = &tpr->rx_jmb[dest_idx].std;
5418                 map = &tpr->rx_jmb_buffers[dest_idx];
5419                 data_size = TG3_RX_JMB_MAP_SZ;
5420                 break;
5421
5422         default:
5423                 return -EINVAL;
5424         }
5425
5426         /* Do not overwrite any of the map or rp information
5427          * until we are sure we can commit to a new buffer.
5428          *
5429          * Callers depend upon this behavior and assume that
5430          * we leave everything unchanged if we fail.
5431          */
5432         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5433                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5434         data = kmalloc(skb_size, GFP_ATOMIC);
5435         if (!data)
5436                 return -ENOMEM;
5437
5438         mapping = pci_map_single(tp->pdev,
5439                                  data + TG3_RX_OFFSET(tp),
5440                                  data_size,
5441                                  PCI_DMA_FROMDEVICE);
5442         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5443                 kfree(data);
5444                 return -EIO;
5445         }
5446
5447         map->data = data;
5448         dma_unmap_addr_set(map, mapping, mapping);
5449
5450         desc->addr_hi = ((u64)mapping >> 32);
5451         desc->addr_lo = ((u64)mapping & 0xffffffff);
5452
5453         return data_size;
5454 }
5455
5456 /* We only need to move over in the address because the other
5457  * members of the RX descriptor are invariant.  See notes above
5458  * tg3_alloc_rx_data for full details.
5459  */
5460 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5461                            struct tg3_rx_prodring_set *dpr,
5462                            u32 opaque_key, int src_idx,
5463                            u32 dest_idx_unmasked)
5464 {
5465         struct tg3 *tp = tnapi->tp;
5466         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5467         struct ring_info *src_map, *dest_map;
5468         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5469         int dest_idx;
5470
5471         switch (opaque_key) {
5472         case RXD_OPAQUE_RING_STD:
5473                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5474                 dest_desc = &dpr->rx_std[dest_idx];
5475                 dest_map = &dpr->rx_std_buffers[dest_idx];
5476                 src_desc = &spr->rx_std[src_idx];
5477                 src_map = &spr->rx_std_buffers[src_idx];
5478                 break;
5479
5480         case RXD_OPAQUE_RING_JUMBO:
5481                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5482                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5483                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5484                 src_desc = &spr->rx_jmb[src_idx].std;
5485                 src_map = &spr->rx_jmb_buffers[src_idx];
5486                 break;
5487
5488         default:
5489                 return;
5490         }
5491
5492         dest_map->data = src_map->data;
5493         dma_unmap_addr_set(dest_map, mapping,
5494                            dma_unmap_addr(src_map, mapping));
5495         dest_desc->addr_hi = src_desc->addr_hi;
5496         dest_desc->addr_lo = src_desc->addr_lo;
5497
5498         /* Ensure that the update to the skb happens after the physical
5499          * addresses have been transferred to the new BD location.
5500          */
5501         smp_wmb();
5502
5503         src_map->data = NULL;
5504 }
5505
5506 /* The RX ring scheme is composed of multiple rings which post fresh
5507  * buffers to the chip, and one special ring the chip uses to report
5508  * status back to the host.
5509  *
5510  * The special ring reports the status of received packets to the
5511  * host.  The chip does not write into the original descriptor the
5512  * RX buffer was obtained from.  The chip simply takes the original
5513  * descriptor as provided by the host, updates the status and length
5514  * field, then writes this into the next status ring entry.
5515  *
5516  * Each ring the host uses to post buffers to the chip is described
5517  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5518  * it is first placed into the on-chip ram.  When the packet's length
5519  * is known, it walks down the TG3_BDINFO entries to select the ring.
5520  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5521  * which is within the range of the new packet's length is chosen.
5522  *
5523  * The "separate ring for rx status" scheme may sound queer, but it makes
5524  * sense from a cache coherency perspective.  If only the host writes
5525  * to the buffer post rings, and only the chip writes to the rx status
5526  * rings, then cache lines never move beyond shared-modified state.
5527  * If both the host and chip were to write into the same ring, cache line
5528  * eviction could occur since both entities want it in an exclusive state.
5529  */
5530 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5531 {
5532         struct tg3 *tp = tnapi->tp;
5533         u32 work_mask, rx_std_posted = 0;
5534         u32 std_prod_idx, jmb_prod_idx;
5535         u32 sw_idx = tnapi->rx_rcb_ptr;
5536         u16 hw_idx;
5537         int received;
5538         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5539
5540         hw_idx = *(tnapi->rx_rcb_prod_idx);
5541         /*
5542          * We need to order the read of hw_idx and the read of
5543          * the opaque cookie.
5544          */
5545         rmb();
5546         work_mask = 0;
5547         received = 0;
5548         std_prod_idx = tpr->rx_std_prod_idx;
5549         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5550         while (sw_idx != hw_idx && budget > 0) {
5551                 struct ring_info *ri;
5552                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5553                 unsigned int len;
5554                 struct sk_buff *skb;
5555                 dma_addr_t dma_addr;
5556                 u32 opaque_key, desc_idx, *post_ptr;
5557                 u8 *data;
5558
5559                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5560                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5561                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5562                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5563                         dma_addr = dma_unmap_addr(ri, mapping);
5564                         data = ri->data;
5565                         post_ptr = &std_prod_idx;
5566                         rx_std_posted++;
5567                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5568                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5569                         dma_addr = dma_unmap_addr(ri, mapping);
5570                         data = ri->data;
5571                         post_ptr = &jmb_prod_idx;
5572                 } else
5573                         goto next_pkt_nopost;
5574
5575                 work_mask |= opaque_key;
5576
5577                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5578                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5579                 drop_it:
5580                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5581                                        desc_idx, *post_ptr);
5582                 drop_it_no_recycle:
5583                         /* Other statistics kept track of by card. */
5584                         tp->rx_dropped++;
5585                         goto next_pkt;
5586                 }
5587
5588                 prefetch(data + TG3_RX_OFFSET(tp));
5589                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5590                       ETH_FCS_LEN;
5591
5592                 if (len > TG3_RX_COPY_THRESH(tp)) {
5593                         int skb_size;
5594
5595                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5596                                                     *post_ptr);
5597                         if (skb_size < 0)
5598                                 goto drop_it;
5599
5600                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5601                                          PCI_DMA_FROMDEVICE);
5602
5603                         skb = build_skb(data);
5604                         if (!skb) {
5605                                 kfree(data);
5606                                 goto drop_it_no_recycle;
5607                         }
5608                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5609                         /* Ensure that the update to the data happens
5610                          * after the usage of the old DMA mapping.
5611                          */
5612                         smp_wmb();
5613
5614                         ri->data = NULL;
5615
5616                 } else {
5617                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5618                                        desc_idx, *post_ptr);
5619
5620                         skb = netdev_alloc_skb(tp->dev,
5621                                                len + TG3_RAW_IP_ALIGN);
5622                         if (skb == NULL)
5623                                 goto drop_it_no_recycle;
5624
5625                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5626                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5627                         memcpy(skb->data,
5628                                data + TG3_RX_OFFSET(tp),
5629                                len);
5630                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5631                 }
5632
5633                 skb_put(skb, len);
5634                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5635                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5636                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5637                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5638                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5639                 else
5640                         skb_checksum_none_assert(skb);
5641
5642                 skb->protocol = eth_type_trans(skb, tp->dev);
5643
5644                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5645                     skb->protocol != htons(ETH_P_8021Q)) {
5646                         dev_kfree_skb(skb);
5647                         goto drop_it_no_recycle;
5648                 }
5649
5650                 if (desc->type_flags & RXD_FLAG_VLAN &&
5651                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5652                         __vlan_hwaccel_put_tag(skb,
5653                                                desc->err_vlan & RXD_VLAN_MASK);
5654
5655                 napi_gro_receive(&tnapi->napi, skb);
5656
5657                 received++;
5658                 budget--;
5659
5660 next_pkt:
5661                 (*post_ptr)++;
5662
5663                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5664                         tpr->rx_std_prod_idx = std_prod_idx &
5665                                                tp->rx_std_ring_mask;
5666                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5667                                      tpr->rx_std_prod_idx);
5668                         work_mask &= ~RXD_OPAQUE_RING_STD;
5669                         rx_std_posted = 0;
5670                 }
5671 next_pkt_nopost:
5672                 sw_idx++;
5673                 sw_idx &= tp->rx_ret_ring_mask;
5674
5675                 /* Refresh hw_idx to see if there is new work */
5676                 if (sw_idx == hw_idx) {
5677                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5678                         rmb();
5679                 }
5680         }
5681
5682         /* ACK the status ring. */
5683         tnapi->rx_rcb_ptr = sw_idx;
5684         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5685
5686         /* Refill RX ring(s). */
5687         if (!tg3_flag(tp, ENABLE_RSS)) {
5688                 if (work_mask & RXD_OPAQUE_RING_STD) {
5689                         tpr->rx_std_prod_idx = std_prod_idx &
5690                                                tp->rx_std_ring_mask;
5691                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5692                                      tpr->rx_std_prod_idx);
5693                 }
5694                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5695                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5696                                                tp->rx_jmb_ring_mask;
5697                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5698                                      tpr->rx_jmb_prod_idx);
5699                 }
5700                 mmiowb();
5701         } else if (work_mask) {
5702                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5703                  * updated before the producer indices can be updated.
5704                  */
5705                 smp_wmb();
5706
5707                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5708                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5709
5710                 if (tnapi != &tp->napi[1])
5711                         napi_schedule(&tp->napi[1].napi);
5712         }
5713
5714         return received;
5715 }
5716
5717 static void tg3_poll_link(struct tg3 *tp)
5718 {
5719         /* handle link change and other phy events */
5720         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5721                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5722
5723                 if (sblk->status & SD_STATUS_LINK_CHG) {
5724                         sblk->status = SD_STATUS_UPDATED |
5725                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5726                         spin_lock(&tp->lock);
5727                         if (tg3_flag(tp, USE_PHYLIB)) {
5728                                 tw32_f(MAC_STATUS,
5729                                      (MAC_STATUS_SYNC_CHANGED |
5730                                       MAC_STATUS_CFG_CHANGED |
5731                                       MAC_STATUS_MI_COMPLETION |
5732                                       MAC_STATUS_LNKSTATE_CHANGED));
5733                                 udelay(40);
5734                         } else
5735                                 tg3_setup_phy(tp, 0);
5736                         spin_unlock(&tp->lock);
5737                 }
5738         }
5739 }
5740
5741 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5742                                 struct tg3_rx_prodring_set *dpr,
5743                                 struct tg3_rx_prodring_set *spr)
5744 {
5745         u32 si, di, cpycnt, src_prod_idx;
5746         int i, err = 0;
5747
5748         while (1) {
5749                 src_prod_idx = spr->rx_std_prod_idx;
5750
5751                 /* Make sure updates to the rx_std_buffers[] entries and the
5752                  * standard producer index are seen in the correct order.
5753                  */
5754                 smp_rmb();
5755
5756                 if (spr->rx_std_cons_idx == src_prod_idx)
5757                         break;
5758
5759                 if (spr->rx_std_cons_idx < src_prod_idx)
5760                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5761                 else
5762                         cpycnt = tp->rx_std_ring_mask + 1 -
5763                                  spr->rx_std_cons_idx;
5764
5765                 cpycnt = min(cpycnt,
5766                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5767
5768                 si = spr->rx_std_cons_idx;
5769                 di = dpr->rx_std_prod_idx;
5770
5771                 for (i = di; i < di + cpycnt; i++) {
5772                         if (dpr->rx_std_buffers[i].data) {
5773                                 cpycnt = i - di;
5774                                 err = -ENOSPC;
5775                                 break;
5776                         }
5777                 }
5778
5779                 if (!cpycnt)
5780                         break;
5781
5782                 /* Ensure that updates to the rx_std_buffers ring and the
5783                  * shadowed hardware producer ring from tg3_recycle_skb() are
5784                  * ordered correctly WRT the skb check above.
5785                  */
5786                 smp_rmb();
5787
5788                 memcpy(&dpr->rx_std_buffers[di],
5789                        &spr->rx_std_buffers[si],
5790                        cpycnt * sizeof(struct ring_info));
5791
5792                 for (i = 0; i < cpycnt; i++, di++, si++) {
5793                         struct tg3_rx_buffer_desc *sbd, *dbd;
5794                         sbd = &spr->rx_std[si];
5795                         dbd = &dpr->rx_std[di];
5796                         dbd->addr_hi = sbd->addr_hi;
5797                         dbd->addr_lo = sbd->addr_lo;
5798                 }
5799
5800                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5801                                        tp->rx_std_ring_mask;
5802                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5803                                        tp->rx_std_ring_mask;
5804         }
5805
5806         while (1) {
5807                 src_prod_idx = spr->rx_jmb_prod_idx;
5808
5809                 /* Make sure updates to the rx_jmb_buffers[] entries and
5810                  * the jumbo producer index are seen in the correct order.
5811                  */
5812                 smp_rmb();
5813
5814                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5815                         break;
5816
5817                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5818                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5819                 else
5820                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5821                                  spr->rx_jmb_cons_idx;
5822
5823                 cpycnt = min(cpycnt,
5824                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5825
5826                 si = spr->rx_jmb_cons_idx;
5827                 di = dpr->rx_jmb_prod_idx;
5828
5829                 for (i = di; i < di + cpycnt; i++) {
5830                         if (dpr->rx_jmb_buffers[i].data) {
5831                                 cpycnt = i - di;
5832                                 err = -ENOSPC;
5833                                 break;
5834                         }
5835                 }
5836
5837                 if (!cpycnt)
5838                         break;
5839
5840                 /* Ensure that updates to the rx_jmb_buffers ring and the
5841                  * shadowed hardware producer ring from tg3_recycle_skb() are
5842                  * ordered correctly WRT the skb check above.
5843                  */
5844                 smp_rmb();
5845
5846                 memcpy(&dpr->rx_jmb_buffers[di],
5847                        &spr->rx_jmb_buffers[si],
5848                        cpycnt * sizeof(struct ring_info));
5849
5850                 for (i = 0; i < cpycnt; i++, di++, si++) {
5851                         struct tg3_rx_buffer_desc *sbd, *dbd;
5852                         sbd = &spr->rx_jmb[si].std;
5853                         dbd = &dpr->rx_jmb[di].std;
5854                         dbd->addr_hi = sbd->addr_hi;
5855                         dbd->addr_lo = sbd->addr_lo;
5856                 }
5857
5858                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5859                                        tp->rx_jmb_ring_mask;
5860                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5861                                        tp->rx_jmb_ring_mask;
5862         }
5863
5864         return err;
5865 }
5866
5867 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5868 {
5869         struct tg3 *tp = tnapi->tp;
5870
5871         /* run TX completion thread */
5872         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5873                 tg3_tx(tnapi);
5874                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5875                         return work_done;
5876         }
5877
5878         /* run RX thread, within the bounds set by NAPI.
5879          * All RX "locking" is done by ensuring outside
5880          * code synchronizes with tg3->napi.poll()
5881          */
5882         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5883                 work_done += tg3_rx(tnapi, budget - work_done);
5884
5885         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5886                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5887                 int i, err = 0;
5888                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5889                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5890
5891                 for (i = 1; i < tp->irq_cnt; i++)
5892                         err |= tg3_rx_prodring_xfer(tp, dpr,
5893                                                     &tp->napi[i].prodring);
5894
5895                 wmb();
5896
5897                 if (std_prod_idx != dpr->rx_std_prod_idx)
5898                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5899                                      dpr->rx_std_prod_idx);
5900
5901                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5902                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5903                                      dpr->rx_jmb_prod_idx);
5904
5905                 mmiowb();
5906
5907                 if (err)
5908                         tw32_f(HOSTCC_MODE, tp->coal_now);
5909         }
5910
5911         return work_done;
5912 }
5913
5914 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5915 {
5916         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5917                 schedule_work(&tp->reset_task);
5918 }
5919
5920 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5921 {
5922         cancel_work_sync(&tp->reset_task);
5923         tg3_flag_clear(tp, RESET_TASK_PENDING);
5924 }
5925
5926 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5927 {
5928         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5929         struct tg3 *tp = tnapi->tp;
5930         int work_done = 0;
5931         struct tg3_hw_status *sblk = tnapi->hw_status;
5932
5933         while (1) {
5934                 work_done = tg3_poll_work(tnapi, work_done, budget);
5935
5936                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5937                         goto tx_recovery;
5938
5939                 if (unlikely(work_done >= budget))
5940                         break;
5941
5942                 /* tp->last_tag is used in tg3_int_reenable() below
5943                  * to tell the hw how much work has been processed,
5944                  * so we must read it before checking for more work.
5945                  */
5946                 tnapi->last_tag = sblk->status_tag;
5947                 tnapi->last_irq_tag = tnapi->last_tag;
5948                 rmb();
5949
5950                 /* check for RX/TX work to do */
5951                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5952                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5953                         napi_complete(napi);
5954                         /* Reenable interrupts. */
5955                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5956                         mmiowb();
5957                         break;
5958                 }
5959         }
5960
5961         return work_done;
5962
5963 tx_recovery:
5964         /* work_done is guaranteed to be less than budget. */
5965         napi_complete(napi);
5966         tg3_reset_task_schedule(tp);
5967         return work_done;
5968 }
5969
5970 static void tg3_process_error(struct tg3 *tp)
5971 {
5972         u32 val;
5973         bool real_error = false;
5974
5975         if (tg3_flag(tp, ERROR_PROCESSED))
5976                 return;
5977
5978         /* Check Flow Attention register */
5979         val = tr32(HOSTCC_FLOW_ATTN);
5980         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5981                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5982                 real_error = true;
5983         }
5984
5985         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5986                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5987                 real_error = true;
5988         }
5989
5990         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5991                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5992                 real_error = true;
5993         }
5994
5995         if (!real_error)
5996                 return;
5997
5998         tg3_dump_state(tp);
5999
6000         tg3_flag_set(tp, ERROR_PROCESSED);
6001         tg3_reset_task_schedule(tp);
6002 }
6003
6004 static int tg3_poll(struct napi_struct *napi, int budget)
6005 {
6006         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6007         struct tg3 *tp = tnapi->tp;
6008         int work_done = 0;
6009         struct tg3_hw_status *sblk = tnapi->hw_status;
6010
6011         while (1) {
6012                 if (sblk->status & SD_STATUS_ERROR)
6013                         tg3_process_error(tp);
6014
6015                 tg3_poll_link(tp);
6016
6017                 work_done = tg3_poll_work(tnapi, work_done, budget);
6018
6019                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6020                         goto tx_recovery;
6021
6022                 if (unlikely(work_done >= budget))
6023                         break;
6024
6025                 if (tg3_flag(tp, TAGGED_STATUS)) {
6026                         /* tp->last_tag is used in tg3_int_reenable() below
6027                          * to tell the hw how much work has been processed,
6028                          * so we must read it before checking for more work.
6029                          */
6030                         tnapi->last_tag = sblk->status_tag;
6031                         tnapi->last_irq_tag = tnapi->last_tag;
6032                         rmb();
6033                 } else
6034                         sblk->status &= ~SD_STATUS_UPDATED;
6035
6036                 if (likely(!tg3_has_work(tnapi))) {
6037                         napi_complete(napi);
6038                         tg3_int_reenable(tnapi);
6039                         break;
6040                 }
6041         }
6042
6043         return work_done;
6044
6045 tx_recovery:
6046         /* work_done is guaranteed to be less than budget. */
6047         napi_complete(napi);
6048         tg3_reset_task_schedule(tp);
6049         return work_done;
6050 }
6051
6052 static void tg3_napi_disable(struct tg3 *tp)
6053 {
6054         int i;
6055
6056         for (i = tp->irq_cnt - 1; i >= 0; i--)
6057                 napi_disable(&tp->napi[i].napi);
6058 }
6059
6060 static void tg3_napi_enable(struct tg3 *tp)
6061 {
6062         int i;
6063
6064         for (i = 0; i < tp->irq_cnt; i++)
6065                 napi_enable(&tp->napi[i].napi);
6066 }
6067
6068 static void tg3_napi_init(struct tg3 *tp)
6069 {
6070         int i;
6071
6072         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6073         for (i = 1; i < tp->irq_cnt; i++)
6074                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6075 }
6076
6077 static void tg3_napi_fini(struct tg3 *tp)
6078 {
6079         int i;
6080
6081         for (i = 0; i < tp->irq_cnt; i++)
6082                 netif_napi_del(&tp->napi[i].napi);
6083 }
6084
6085 static inline void tg3_netif_stop(struct tg3 *tp)
6086 {
6087         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6088         tg3_napi_disable(tp);
6089         netif_tx_disable(tp->dev);
6090 }
6091
6092 static inline void tg3_netif_start(struct tg3 *tp)
6093 {
6094         /* NOTE: unconditional netif_tx_wake_all_queues is only
6095          * appropriate so long as all callers are assured to
6096          * have free tx slots (such as after tg3_init_hw)
6097          */
6098         netif_tx_wake_all_queues(tp->dev);
6099
6100         tg3_napi_enable(tp);
6101         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6102         tg3_enable_ints(tp);
6103 }
6104
6105 static void tg3_irq_quiesce(struct tg3 *tp)
6106 {
6107         int i;
6108
6109         BUG_ON(tp->irq_sync);
6110
6111         tp->irq_sync = 1;
6112         smp_mb();
6113
6114         for (i = 0; i < tp->irq_cnt; i++)
6115                 synchronize_irq(tp->napi[i].irq_vec);
6116 }
6117
6118 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6119  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6120  * with as well.  Most of the time, this is not necessary except when
6121  * shutting down the device.
6122  */
6123 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6124 {
6125         spin_lock_bh(&tp->lock);
6126         if (irq_sync)
6127                 tg3_irq_quiesce(tp);
6128 }
6129
6130 static inline void tg3_full_unlock(struct tg3 *tp)
6131 {
6132         spin_unlock_bh(&tp->lock);
6133 }
6134
6135 /* One-shot MSI handler - Chip automatically disables interrupt
6136  * after sending MSI so driver doesn't have to do it.
6137  */
6138 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6139 {
6140         struct tg3_napi *tnapi = dev_id;
6141         struct tg3 *tp = tnapi->tp;
6142
6143         prefetch(tnapi->hw_status);
6144         if (tnapi->rx_rcb)
6145                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6146
6147         if (likely(!tg3_irq_sync(tp)))
6148                 napi_schedule(&tnapi->napi);
6149
6150         return IRQ_HANDLED;
6151 }
6152
6153 /* MSI ISR - No need to check for interrupt sharing and no need to
6154  * flush status block and interrupt mailbox. PCI ordering rules
6155  * guarantee that MSI will arrive after the status block.
6156  */
6157 static irqreturn_t tg3_msi(int irq, void *dev_id)
6158 {
6159         struct tg3_napi *tnapi = dev_id;
6160         struct tg3 *tp = tnapi->tp;
6161
6162         prefetch(tnapi->hw_status);
6163         if (tnapi->rx_rcb)
6164                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6165         /*
6166          * Writing any value to intr-mbox-0 clears PCI INTA# and
6167          * chip-internal interrupt pending events.
6168          * Writing non-zero to intr-mbox-0 additional tells the
6169          * NIC to stop sending us irqs, engaging "in-intr-handler"
6170          * event coalescing.
6171          */
6172         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6173         if (likely(!tg3_irq_sync(tp)))
6174                 napi_schedule(&tnapi->napi);
6175
6176         return IRQ_RETVAL(1);
6177 }
6178
6179 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6180 {
6181         struct tg3_napi *tnapi = dev_id;
6182         struct tg3 *tp = tnapi->tp;
6183         struct tg3_hw_status *sblk = tnapi->hw_status;
6184         unsigned int handled = 1;
6185
6186         /* In INTx mode, it is possible for the interrupt to arrive at
6187          * the CPU before the status block posted prior to the interrupt.
6188          * Reading the PCI State register will confirm whether the
6189          * interrupt is ours and will flush the status block.
6190          */
6191         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6192                 if (tg3_flag(tp, CHIP_RESETTING) ||
6193                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6194                         handled = 0;
6195                         goto out;
6196                 }
6197         }
6198
6199         /*
6200          * Writing any value to intr-mbox-0 clears PCI INTA# and
6201          * chip-internal interrupt pending events.
6202          * Writing non-zero to intr-mbox-0 additional tells the
6203          * NIC to stop sending us irqs, engaging "in-intr-handler"
6204          * event coalescing.
6205          *
6206          * Flush the mailbox to de-assert the IRQ immediately to prevent
6207          * spurious interrupts.  The flush impacts performance but
6208          * excessive spurious interrupts can be worse in some cases.
6209          */
6210         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6211         if (tg3_irq_sync(tp))
6212                 goto out;
6213         sblk->status &= ~SD_STATUS_UPDATED;
6214         if (likely(tg3_has_work(tnapi))) {
6215                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6216                 napi_schedule(&tnapi->napi);
6217         } else {
6218                 /* No work, shared interrupt perhaps?  re-enable
6219                  * interrupts, and flush that PCI write
6220                  */
6221                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6222                                0x00000000);
6223         }
6224 out:
6225         return IRQ_RETVAL(handled);
6226 }
6227
6228 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6229 {
6230         struct tg3_napi *tnapi = dev_id;
6231         struct tg3 *tp = tnapi->tp;
6232         struct tg3_hw_status *sblk = tnapi->hw_status;
6233         unsigned int handled = 1;
6234
6235         /* In INTx mode, it is possible for the interrupt to arrive at
6236          * the CPU before the status block posted prior to the interrupt.
6237          * Reading the PCI State register will confirm whether the
6238          * interrupt is ours and will flush the status block.
6239          */
6240         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6241                 if (tg3_flag(tp, CHIP_RESETTING) ||
6242                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6243                         handled = 0;
6244                         goto out;
6245                 }
6246         }
6247
6248         /*
6249          * writing any value to intr-mbox-0 clears PCI INTA# and
6250          * chip-internal interrupt pending events.
6251          * writing non-zero to intr-mbox-0 additional tells the
6252          * NIC to stop sending us irqs, engaging "in-intr-handler"
6253          * event coalescing.
6254          *
6255          * Flush the mailbox to de-assert the IRQ immediately to prevent
6256          * spurious interrupts.  The flush impacts performance but
6257          * excessive spurious interrupts can be worse in some cases.
6258          */
6259         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6260
6261         /*
6262          * In a shared interrupt configuration, sometimes other devices'
6263          * interrupts will scream.  We record the current status tag here
6264          * so that the above check can report that the screaming interrupts
6265          * are unhandled.  Eventually they will be silenced.
6266          */
6267         tnapi->last_irq_tag = sblk->status_tag;
6268
6269         if (tg3_irq_sync(tp))
6270                 goto out;
6271
6272         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6273
6274         napi_schedule(&tnapi->napi);
6275
6276 out:
6277         return IRQ_RETVAL(handled);
6278 }
6279
6280 /* ISR for interrupt test */
6281 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6282 {
6283         struct tg3_napi *tnapi = dev_id;
6284         struct tg3 *tp = tnapi->tp;
6285         struct tg3_hw_status *sblk = tnapi->hw_status;
6286
6287         if ((sblk->status & SD_STATUS_UPDATED) ||
6288             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6289                 tg3_disable_ints(tp);
6290                 return IRQ_RETVAL(1);
6291         }
6292         return IRQ_RETVAL(0);
6293 }
6294
6295 static int tg3_init_hw(struct tg3 *, int);
6296 static int tg3_halt(struct tg3 *, int, int);
6297
6298 /* Restart hardware after configuration changes, self-test, etc.
6299  * Invoked with tp->lock held.
6300  */
6301 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6302         __releases(tp->lock)
6303         __acquires(tp->lock)
6304 {
6305         int err;
6306
6307         err = tg3_init_hw(tp, reset_phy);
6308         if (err) {
6309                 netdev_err(tp->dev,
6310                            "Failed to re-initialize device, aborting\n");
6311                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6312                 tg3_full_unlock(tp);
6313                 del_timer_sync(&tp->timer);
6314                 tp->irq_sync = 0;
6315                 tg3_napi_enable(tp);
6316                 dev_close(tp->dev);
6317                 tg3_full_lock(tp, 0);
6318         }
6319         return err;
6320 }
6321
6322 #ifdef CONFIG_NET_POLL_CONTROLLER
6323 static void tg3_poll_controller(struct net_device *dev)
6324 {
6325         int i;
6326         struct tg3 *tp = netdev_priv(dev);
6327
6328         for (i = 0; i < tp->irq_cnt; i++)
6329                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6330 }
6331 #endif
6332
6333 static void tg3_reset_task(struct work_struct *work)
6334 {
6335         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6336         int err;
6337
6338         tg3_full_lock(tp, 0);
6339
6340         if (!netif_running(tp->dev)) {
6341                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6342                 tg3_full_unlock(tp);
6343                 return;
6344         }
6345
6346         tg3_full_unlock(tp);
6347
6348         tg3_phy_stop(tp);
6349
6350         tg3_netif_stop(tp);
6351
6352         tg3_full_lock(tp, 1);
6353
6354         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6355                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6356                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6357                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6358                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6359         }
6360
6361         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6362         err = tg3_init_hw(tp, 1);
6363         if (err)
6364                 goto out;
6365
6366         tg3_netif_start(tp);
6367
6368 out:
6369         tg3_full_unlock(tp);
6370
6371         if (!err)
6372                 tg3_phy_start(tp);
6373
6374         tg3_flag_clear(tp, RESET_TASK_PENDING);
6375 }
6376
6377 static void tg3_tx_timeout(struct net_device *dev)
6378 {
6379         struct tg3 *tp = netdev_priv(dev);
6380
6381         if (netif_msg_tx_err(tp)) {
6382                 netdev_err(dev, "transmit timed out, resetting\n");
6383                 tg3_dump_state(tp);
6384         }
6385
6386         tg3_reset_task_schedule(tp);
6387 }
6388
6389 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6390 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6391 {
6392         u32 base = (u32) mapping & 0xffffffff;
6393
6394         return (base > 0xffffdcc0) && (base + len + 8 < base);
6395 }
6396
6397 /* Test for DMA addresses > 40-bit */
6398 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6399                                           int len)
6400 {
6401 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6402         if (tg3_flag(tp, 40BIT_DMA_BUG))
6403                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6404         return 0;
6405 #else
6406         return 0;
6407 #endif
6408 }
6409
6410 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6411                                  dma_addr_t mapping, u32 len, u32 flags,
6412                                  u32 mss, u32 vlan)
6413 {
6414         txbd->addr_hi = ((u64) mapping >> 32);
6415         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6416         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6417         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6418 }
6419
6420 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6421                             dma_addr_t map, u32 len, u32 flags,
6422                             u32 mss, u32 vlan)
6423 {
6424         struct tg3 *tp = tnapi->tp;
6425         bool hwbug = false;
6426
6427         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6428                 hwbug = true;
6429
6430         if (tg3_4g_overflow_test(map, len))
6431                 hwbug = true;
6432
6433         if (tg3_40bit_overflow_test(tp, map, len))
6434                 hwbug = true;
6435
6436         if (tp->dma_limit) {
6437                 u32 prvidx = *entry;
6438                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6439                 while (len > tp->dma_limit && *budget) {
6440                         u32 frag_len = tp->dma_limit;
6441                         len -= tp->dma_limit;
6442
6443                         /* Avoid the 8byte DMA problem */
6444                         if (len <= 8) {
6445                                 len += tp->dma_limit / 2;
6446                                 frag_len = tp->dma_limit / 2;
6447                         }
6448
6449                         tnapi->tx_buffers[*entry].fragmented = true;
6450
6451                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6452                                       frag_len, tmp_flag, mss, vlan);
6453                         *budget -= 1;
6454                         prvidx = *entry;
6455                         *entry = NEXT_TX(*entry);
6456
6457                         map += frag_len;
6458                 }
6459
6460                 if (len) {
6461                         if (*budget) {
6462                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6463                                               len, flags, mss, vlan);
6464                                 *budget -= 1;
6465                                 *entry = NEXT_TX(*entry);
6466                         } else {
6467                                 hwbug = true;
6468                                 tnapi->tx_buffers[prvidx].fragmented = false;
6469                         }
6470                 }
6471         } else {
6472                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6473                               len, flags, mss, vlan);
6474                 *entry = NEXT_TX(*entry);
6475         }
6476
6477         return hwbug;
6478 }
6479
6480 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6481 {
6482         int i;
6483         struct sk_buff *skb;
6484         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6485
6486         skb = txb->skb;
6487         txb->skb = NULL;
6488
6489         pci_unmap_single(tnapi->tp->pdev,
6490                          dma_unmap_addr(txb, mapping),
6491                          skb_headlen(skb),
6492                          PCI_DMA_TODEVICE);
6493
6494         while (txb->fragmented) {
6495                 txb->fragmented = false;
6496                 entry = NEXT_TX(entry);
6497                 txb = &tnapi->tx_buffers[entry];
6498         }
6499
6500         for (i = 0; i <= last; i++) {
6501                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6502
6503                 entry = NEXT_TX(entry);
6504                 txb = &tnapi->tx_buffers[entry];
6505
6506                 pci_unmap_page(tnapi->tp->pdev,
6507                                dma_unmap_addr(txb, mapping),
6508                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6509
6510                 while (txb->fragmented) {
6511                         txb->fragmented = false;
6512                         entry = NEXT_TX(entry);
6513                         txb = &tnapi->tx_buffers[entry];
6514                 }
6515         }
6516 }
6517
6518 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6519 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6520                                        struct sk_buff **pskb,
6521                                        u32 *entry, u32 *budget,
6522                                        u32 base_flags, u32 mss, u32 vlan)
6523 {
6524         struct tg3 *tp = tnapi->tp;
6525         struct sk_buff *new_skb, *skb = *pskb;
6526         dma_addr_t new_addr = 0;
6527         int ret = 0;
6528
6529         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6530                 new_skb = skb_copy(skb, GFP_ATOMIC);
6531         else {
6532                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6533
6534                 new_skb = skb_copy_expand(skb,
6535                                           skb_headroom(skb) + more_headroom,
6536                                           skb_tailroom(skb), GFP_ATOMIC);
6537         }
6538
6539         if (!new_skb) {
6540                 ret = -1;
6541         } else {
6542                 /* New SKB is guaranteed to be linear. */
6543                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6544                                           PCI_DMA_TODEVICE);
6545                 /* Make sure the mapping succeeded */
6546                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6547                         dev_kfree_skb(new_skb);
6548                         ret = -1;
6549                 } else {
6550                         u32 save_entry = *entry;
6551
6552                         base_flags |= TXD_FLAG_END;
6553
6554                         tnapi->tx_buffers[*entry].skb = new_skb;
6555                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6556                                            mapping, new_addr);
6557
6558                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6559                                             new_skb->len, base_flags,
6560                                             mss, vlan)) {
6561                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6562                                 dev_kfree_skb(new_skb);
6563                                 ret = -1;
6564                         }
6565                 }
6566         }
6567
6568         dev_kfree_skb(skb);
6569         *pskb = new_skb;
6570         return ret;
6571 }
6572
6573 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6574
6575 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6576  * TSO header is greater than 80 bytes.
6577  */
6578 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6579 {
6580         struct sk_buff *segs, *nskb;
6581         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6582
6583         /* Estimate the number of fragments in the worst case */
6584         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6585                 netif_stop_queue(tp->dev);
6586
6587                 /* netif_tx_stop_queue() must be done before checking
6588                  * checking tx index in tg3_tx_avail() below, because in
6589                  * tg3_tx(), we update tx index before checking for
6590                  * netif_tx_queue_stopped().
6591                  */
6592                 smp_mb();
6593                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6594                         return NETDEV_TX_BUSY;
6595
6596                 netif_wake_queue(tp->dev);
6597         }
6598
6599         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6600         if (IS_ERR(segs))
6601                 goto tg3_tso_bug_end;
6602
6603         do {
6604                 nskb = segs;
6605                 segs = segs->next;
6606                 nskb->next = NULL;
6607                 tg3_start_xmit(nskb, tp->dev);
6608         } while (segs);
6609
6610 tg3_tso_bug_end:
6611         dev_kfree_skb(skb);
6612
6613         return NETDEV_TX_OK;
6614 }
6615
6616 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6617  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6618  */
6619 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6620 {
6621         struct tg3 *tp = netdev_priv(dev);
6622         u32 len, entry, base_flags, mss, vlan = 0;
6623         u32 budget;
6624         int i = -1, would_hit_hwbug;
6625         dma_addr_t mapping;
6626         struct tg3_napi *tnapi;
6627         struct netdev_queue *txq;
6628         unsigned int last;
6629
6630         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6631         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6632         if (tg3_flag(tp, ENABLE_TSS))
6633                 tnapi++;
6634
6635         budget = tg3_tx_avail(tnapi);
6636
6637         /* We are running in BH disabled context with netif_tx_lock
6638          * and TX reclaim runs via tp->napi.poll inside of a software
6639          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6640          * no IRQ context deadlocks to worry about either.  Rejoice!
6641          */
6642         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6643                 if (!netif_tx_queue_stopped(txq)) {
6644                         netif_tx_stop_queue(txq);
6645
6646                         /* This is a hard error, log it. */
6647                         netdev_err(dev,
6648                                    "BUG! Tx Ring full when queue awake!\n");
6649                 }
6650                 return NETDEV_TX_BUSY;
6651         }
6652
6653         entry = tnapi->tx_prod;
6654         base_flags = 0;
6655         if (skb->ip_summed == CHECKSUM_PARTIAL)
6656                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6657
6658         mss = skb_shinfo(skb)->gso_size;
6659         if (mss) {
6660                 struct iphdr *iph;
6661                 u32 tcp_opt_len, hdr_len;
6662
6663                 if (skb_header_cloned(skb) &&
6664                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6665                         goto drop;
6666
6667                 iph = ip_hdr(skb);
6668                 tcp_opt_len = tcp_optlen(skb);
6669
6670                 if (skb_is_gso_v6(skb)) {
6671                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6672                 } else {
6673                         u32 ip_tcp_len;
6674
6675                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6676                         hdr_len = ip_tcp_len + tcp_opt_len;
6677
6678                         iph->check = 0;
6679                         iph->tot_len = htons(mss + hdr_len);
6680                 }
6681
6682                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6683                     tg3_flag(tp, TSO_BUG))
6684                         return tg3_tso_bug(tp, skb);
6685
6686                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6687                                TXD_FLAG_CPU_POST_DMA);
6688
6689                 if (tg3_flag(tp, HW_TSO_1) ||
6690                     tg3_flag(tp, HW_TSO_2) ||
6691                     tg3_flag(tp, HW_TSO_3)) {
6692                         tcp_hdr(skb)->check = 0;
6693                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6694                 } else
6695                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6696                                                                  iph->daddr, 0,
6697                                                                  IPPROTO_TCP,
6698                                                                  0);
6699
6700                 if (tg3_flag(tp, HW_TSO_3)) {
6701                         mss |= (hdr_len & 0xc) << 12;
6702                         if (hdr_len & 0x10)
6703                                 base_flags |= 0x00000010;
6704                         base_flags |= (hdr_len & 0x3e0) << 5;
6705                 } else if (tg3_flag(tp, HW_TSO_2))
6706                         mss |= hdr_len << 9;
6707                 else if (tg3_flag(tp, HW_TSO_1) ||
6708                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6709                         if (tcp_opt_len || iph->ihl > 5) {
6710                                 int tsflags;
6711
6712                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6713                                 mss |= (tsflags << 11);
6714                         }
6715                 } else {
6716                         if (tcp_opt_len || iph->ihl > 5) {
6717                                 int tsflags;
6718
6719                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6720                                 base_flags |= tsflags << 12;
6721                         }
6722                 }
6723         }
6724
6725         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6726             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6727                 base_flags |= TXD_FLAG_JMB_PKT;
6728
6729         if (vlan_tx_tag_present(skb)) {
6730                 base_flags |= TXD_FLAG_VLAN;
6731                 vlan = vlan_tx_tag_get(skb);
6732         }
6733
6734         len = skb_headlen(skb);
6735
6736         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6737         if (pci_dma_mapping_error(tp->pdev, mapping))
6738                 goto drop;
6739
6740
6741         tnapi->tx_buffers[entry].skb = skb;
6742         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6743
6744         would_hit_hwbug = 0;
6745
6746         if (tg3_flag(tp, 5701_DMA_BUG))
6747                 would_hit_hwbug = 1;
6748
6749         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6750                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6751                             mss, vlan)) {
6752                 would_hit_hwbug = 1;
6753         /* Now loop through additional data fragments, and queue them. */
6754         } else if (skb_shinfo(skb)->nr_frags > 0) {
6755                 u32 tmp_mss = mss;
6756
6757                 if (!tg3_flag(tp, HW_TSO_1) &&
6758                     !tg3_flag(tp, HW_TSO_2) &&
6759                     !tg3_flag(tp, HW_TSO_3))
6760                         tmp_mss = 0;
6761
6762                 last = skb_shinfo(skb)->nr_frags - 1;
6763                 for (i = 0; i <= last; i++) {
6764                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6765
6766                         len = skb_frag_size(frag);
6767                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6768                                                    len, DMA_TO_DEVICE);
6769
6770                         tnapi->tx_buffers[entry].skb = NULL;
6771                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6772                                            mapping);
6773                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6774                                 goto dma_error;
6775
6776                         if (!budget ||
6777                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6778                                             len, base_flags |
6779                                             ((i == last) ? TXD_FLAG_END : 0),
6780                                             tmp_mss, vlan)) {
6781                                 would_hit_hwbug = 1;
6782                                 break;
6783                         }
6784                 }
6785         }
6786
6787         if (would_hit_hwbug) {
6788                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6789
6790                 /* If the workaround fails due to memory/mapping
6791                  * failure, silently drop this packet.
6792                  */
6793                 entry = tnapi->tx_prod;
6794                 budget = tg3_tx_avail(tnapi);
6795                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6796                                                 base_flags, mss, vlan))
6797                         goto drop_nofree;
6798         }
6799
6800         skb_tx_timestamp(skb);
6801         netdev_sent_queue(tp->dev, skb->len);
6802
6803         /* Packets are ready, update Tx producer idx local and on card. */
6804         tw32_tx_mbox(tnapi->prodmbox, entry);
6805
6806         tnapi->tx_prod = entry;
6807         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6808                 netif_tx_stop_queue(txq);
6809
6810                 /* netif_tx_stop_queue() must be done before checking
6811                  * checking tx index in tg3_tx_avail() below, because in
6812                  * tg3_tx(), we update tx index before checking for
6813                  * netif_tx_queue_stopped().
6814                  */
6815                 smp_mb();
6816                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6817                         netif_tx_wake_queue(txq);
6818         }
6819
6820         mmiowb();
6821         return NETDEV_TX_OK;
6822
6823 dma_error:
6824         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6825         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6826 drop:
6827         dev_kfree_skb(skb);
6828 drop_nofree:
6829         tp->tx_dropped++;
6830         return NETDEV_TX_OK;
6831 }
6832
6833 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6834 {
6835         if (enable) {
6836                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6837                                   MAC_MODE_PORT_MODE_MASK);
6838
6839                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6840
6841                 if (!tg3_flag(tp, 5705_PLUS))
6842                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6843
6844                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6845                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6846                 else
6847                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6848         } else {
6849                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6850
6851                 if (tg3_flag(tp, 5705_PLUS) ||
6852                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6853                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6854                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6855         }
6856
6857         tw32(MAC_MODE, tp->mac_mode);
6858         udelay(40);
6859 }
6860
6861 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6862 {
6863         u32 val, bmcr, mac_mode, ptest = 0;
6864
6865         tg3_phy_toggle_apd(tp, false);
6866         tg3_phy_toggle_automdix(tp, 0);
6867
6868         if (extlpbk && tg3_phy_set_extloopbk(tp))
6869                 return -EIO;
6870
6871         bmcr = BMCR_FULLDPLX;
6872         switch (speed) {
6873         case SPEED_10:
6874                 break;
6875         case SPEED_100:
6876                 bmcr |= BMCR_SPEED100;
6877                 break;
6878         case SPEED_1000:
6879         default:
6880                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6881                         speed = SPEED_100;
6882                         bmcr |= BMCR_SPEED100;
6883                 } else {
6884                         speed = SPEED_1000;
6885                         bmcr |= BMCR_SPEED1000;
6886                 }
6887         }
6888
6889         if (extlpbk) {
6890                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6891                         tg3_readphy(tp, MII_CTRL1000, &val);
6892                         val |= CTL1000_AS_MASTER |
6893                                CTL1000_ENABLE_MASTER;
6894                         tg3_writephy(tp, MII_CTRL1000, val);
6895                 } else {
6896                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6897                                 MII_TG3_FET_PTEST_TRIM_2;
6898                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6899                 }
6900         } else
6901                 bmcr |= BMCR_LOOPBACK;
6902
6903         tg3_writephy(tp, MII_BMCR, bmcr);
6904
6905         /* The write needs to be flushed for the FETs */
6906         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6907                 tg3_readphy(tp, MII_BMCR, &bmcr);
6908
6909         udelay(40);
6910
6911         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6912             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6913                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6914                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6915                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6916
6917                 /* The write needs to be flushed for the AC131 */
6918                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6919         }
6920
6921         /* Reset to prevent losing 1st rx packet intermittently */
6922         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6923             tg3_flag(tp, 5780_CLASS)) {
6924                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6925                 udelay(10);
6926                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6927         }
6928
6929         mac_mode = tp->mac_mode &
6930                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6931         if (speed == SPEED_1000)
6932                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6933         else
6934                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6935
6936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6937                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6938
6939                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6940                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6941                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6942                         mac_mode |= MAC_MODE_LINK_POLARITY;
6943
6944                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6945                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6946         }
6947
6948         tw32(MAC_MODE, mac_mode);
6949         udelay(40);
6950
6951         return 0;
6952 }
6953
6954 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
6955 {
6956         struct tg3 *tp = netdev_priv(dev);
6957
6958         if (features & NETIF_F_LOOPBACK) {
6959                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6960                         return;
6961
6962                 spin_lock_bh(&tp->lock);
6963                 tg3_mac_loopback(tp, true);
6964                 netif_carrier_on(tp->dev);
6965                 spin_unlock_bh(&tp->lock);
6966                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6967         } else {
6968                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6969                         return;
6970
6971                 spin_lock_bh(&tp->lock);
6972                 tg3_mac_loopback(tp, false);
6973                 /* Force link status check */
6974                 tg3_setup_phy(tp, 1);
6975                 spin_unlock_bh(&tp->lock);
6976                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6977         }
6978 }
6979
6980 static netdev_features_t tg3_fix_features(struct net_device *dev,
6981         netdev_features_t features)
6982 {
6983         struct tg3 *tp = netdev_priv(dev);
6984
6985         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6986                 features &= ~NETIF_F_ALL_TSO;
6987
6988         return features;
6989 }
6990
6991 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
6992 {
6993         netdev_features_t changed = dev->features ^ features;
6994
6995         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6996                 tg3_set_loopback(dev, features);
6997
6998         return 0;
6999 }
7000
7001 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7002                                int new_mtu)
7003 {
7004         dev->mtu = new_mtu;
7005
7006         if (new_mtu > ETH_DATA_LEN) {
7007                 if (tg3_flag(tp, 5780_CLASS)) {
7008                         netdev_update_features(dev);
7009                         tg3_flag_clear(tp, TSO_CAPABLE);
7010                 } else {
7011                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7012                 }
7013         } else {
7014                 if (tg3_flag(tp, 5780_CLASS)) {
7015                         tg3_flag_set(tp, TSO_CAPABLE);
7016                         netdev_update_features(dev);
7017                 }
7018                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7019         }
7020 }
7021
7022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7023 {
7024         struct tg3 *tp = netdev_priv(dev);
7025         int err;
7026
7027         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7028                 return -EINVAL;
7029
7030         if (!netif_running(dev)) {
7031                 /* We'll just catch it later when the
7032                  * device is up'd.
7033                  */
7034                 tg3_set_mtu(dev, tp, new_mtu);
7035                 return 0;
7036         }
7037
7038         tg3_phy_stop(tp);
7039
7040         tg3_netif_stop(tp);
7041
7042         tg3_full_lock(tp, 1);
7043
7044         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7045
7046         tg3_set_mtu(dev, tp, new_mtu);
7047
7048         err = tg3_restart_hw(tp, 0);
7049
7050         if (!err)
7051                 tg3_netif_start(tp);
7052
7053         tg3_full_unlock(tp);
7054
7055         if (!err)
7056                 tg3_phy_start(tp);
7057
7058         return err;
7059 }
7060
7061 static void tg3_rx_prodring_free(struct tg3 *tp,
7062                                  struct tg3_rx_prodring_set *tpr)
7063 {
7064         int i;
7065
7066         if (tpr != &tp->napi[0].prodring) {
7067                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7068                      i = (i + 1) & tp->rx_std_ring_mask)
7069                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7070                                         tp->rx_pkt_map_sz);
7071
7072                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7073                         for (i = tpr->rx_jmb_cons_idx;
7074                              i != tpr->rx_jmb_prod_idx;
7075                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7076                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7077                                                 TG3_RX_JMB_MAP_SZ);
7078                         }
7079                 }
7080
7081                 return;
7082         }
7083
7084         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7085                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7086                                 tp->rx_pkt_map_sz);
7087
7088         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7089                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7090                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7091                                         TG3_RX_JMB_MAP_SZ);
7092         }
7093 }
7094
7095 /* Initialize rx rings for packet processing.
7096  *
7097  * The chip has been shut down and the driver detached from
7098  * the networking, so no interrupts or new tx packets will
7099  * end up in the driver.  tp->{tx,}lock are held and thus
7100  * we may not sleep.
7101  */
7102 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7103                                  struct tg3_rx_prodring_set *tpr)
7104 {
7105         u32 i, rx_pkt_dma_sz;
7106
7107         tpr->rx_std_cons_idx = 0;
7108         tpr->rx_std_prod_idx = 0;
7109         tpr->rx_jmb_cons_idx = 0;
7110         tpr->rx_jmb_prod_idx = 0;
7111
7112         if (tpr != &tp->napi[0].prodring) {
7113                 memset(&tpr->rx_std_buffers[0], 0,
7114                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7115                 if (tpr->rx_jmb_buffers)
7116                         memset(&tpr->rx_jmb_buffers[0], 0,
7117                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7118                 goto done;
7119         }
7120
7121         /* Zero out all descriptors. */
7122         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7123
7124         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7125         if (tg3_flag(tp, 5780_CLASS) &&
7126             tp->dev->mtu > ETH_DATA_LEN)
7127                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7128         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7129
7130         /* Initialize invariants of the rings, we only set this
7131          * stuff once.  This works because the card does not
7132          * write into the rx buffer posting rings.
7133          */
7134         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7135                 struct tg3_rx_buffer_desc *rxd;
7136
7137                 rxd = &tpr->rx_std[i];
7138                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7139                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7140                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7141                                (i << RXD_OPAQUE_INDEX_SHIFT));
7142         }
7143
7144         /* Now allocate fresh SKBs for each rx ring. */
7145         for (i = 0; i < tp->rx_pending; i++) {
7146                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7147                         netdev_warn(tp->dev,
7148                                     "Using a smaller RX standard ring. Only "
7149                                     "%d out of %d buffers were allocated "
7150                                     "successfully\n", i, tp->rx_pending);
7151                         if (i == 0)
7152                                 goto initfail;
7153                         tp->rx_pending = i;
7154                         break;
7155                 }
7156         }
7157
7158         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7159                 goto done;
7160
7161         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7162
7163         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7164                 goto done;
7165
7166         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7167                 struct tg3_rx_buffer_desc *rxd;
7168
7169                 rxd = &tpr->rx_jmb[i].std;
7170                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7171                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7172                                   RXD_FLAG_JUMBO;
7173                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7174                        (i << RXD_OPAQUE_INDEX_SHIFT));
7175         }
7176
7177         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7178                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7179                         netdev_warn(tp->dev,
7180                                     "Using a smaller RX jumbo ring. Only %d "
7181                                     "out of %d buffers were allocated "
7182                                     "successfully\n", i, tp->rx_jumbo_pending);
7183                         if (i == 0)
7184                                 goto initfail;
7185                         tp->rx_jumbo_pending = i;
7186                         break;
7187                 }
7188         }
7189
7190 done:
7191         return 0;
7192
7193 initfail:
7194         tg3_rx_prodring_free(tp, tpr);
7195         return -ENOMEM;
7196 }
7197
7198 static void tg3_rx_prodring_fini(struct tg3 *tp,
7199                                  struct tg3_rx_prodring_set *tpr)
7200 {
7201         kfree(tpr->rx_std_buffers);
7202         tpr->rx_std_buffers = NULL;
7203         kfree(tpr->rx_jmb_buffers);
7204         tpr->rx_jmb_buffers = NULL;
7205         if (tpr->rx_std) {
7206                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7207                                   tpr->rx_std, tpr->rx_std_mapping);
7208                 tpr->rx_std = NULL;
7209         }
7210         if (tpr->rx_jmb) {
7211                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7212                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7213                 tpr->rx_jmb = NULL;
7214         }
7215 }
7216
7217 static int tg3_rx_prodring_init(struct tg3 *tp,
7218                                 struct tg3_rx_prodring_set *tpr)
7219 {
7220         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7221                                       GFP_KERNEL);
7222         if (!tpr->rx_std_buffers)
7223                 return -ENOMEM;
7224
7225         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7226                                          TG3_RX_STD_RING_BYTES(tp),
7227                                          &tpr->rx_std_mapping,
7228                                          GFP_KERNEL);
7229         if (!tpr->rx_std)
7230                 goto err_out;
7231
7232         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7233                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7234                                               GFP_KERNEL);
7235                 if (!tpr->rx_jmb_buffers)
7236                         goto err_out;
7237
7238                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7239                                                  TG3_RX_JMB_RING_BYTES(tp),
7240                                                  &tpr->rx_jmb_mapping,
7241                                                  GFP_KERNEL);
7242                 if (!tpr->rx_jmb)
7243                         goto err_out;
7244         }
7245
7246         return 0;
7247
7248 err_out:
7249         tg3_rx_prodring_fini(tp, tpr);
7250         return -ENOMEM;
7251 }
7252
7253 /* Free up pending packets in all rx/tx rings.
7254  *
7255  * The chip has been shut down and the driver detached from
7256  * the networking, so no interrupts or new tx packets will
7257  * end up in the driver.  tp->{tx,}lock is not held and we are not
7258  * in an interrupt context and thus may sleep.
7259  */
7260 static void tg3_free_rings(struct tg3 *tp)
7261 {
7262         int i, j;
7263
7264         for (j = 0; j < tp->irq_cnt; j++) {
7265                 struct tg3_napi *tnapi = &tp->napi[j];
7266
7267                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7268
7269                 if (!tnapi->tx_buffers)
7270                         continue;
7271
7272                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7273                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7274
7275                         if (!skb)
7276                                 continue;
7277
7278                         tg3_tx_skb_unmap(tnapi, i,
7279                                          skb_shinfo(skb)->nr_frags - 1);
7280
7281                         dev_kfree_skb_any(skb);
7282                 }
7283         }
7284         netdev_reset_queue(tp->dev);
7285 }
7286
7287 /* Initialize tx/rx rings for packet processing.
7288  *
7289  * The chip has been shut down and the driver detached from
7290  * the networking, so no interrupts or new tx packets will
7291  * end up in the driver.  tp->{tx,}lock are held and thus
7292  * we may not sleep.
7293  */
7294 static int tg3_init_rings(struct tg3 *tp)
7295 {
7296         int i;
7297
7298         /* Free up all the SKBs. */
7299         tg3_free_rings(tp);
7300
7301         for (i = 0; i < tp->irq_cnt; i++) {
7302                 struct tg3_napi *tnapi = &tp->napi[i];
7303
7304                 tnapi->last_tag = 0;
7305                 tnapi->last_irq_tag = 0;
7306                 tnapi->hw_status->status = 0;
7307                 tnapi->hw_status->status_tag = 0;
7308                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7309
7310                 tnapi->tx_prod = 0;
7311                 tnapi->tx_cons = 0;
7312                 if (tnapi->tx_ring)
7313                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7314
7315                 tnapi->rx_rcb_ptr = 0;
7316                 if (tnapi->rx_rcb)
7317                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7318
7319                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7320                         tg3_free_rings(tp);
7321                         return -ENOMEM;
7322                 }
7323         }
7324
7325         return 0;
7326 }
7327
7328 /*
7329  * Must not be invoked with interrupt sources disabled and
7330  * the hardware shutdown down.
7331  */
7332 static void tg3_free_consistent(struct tg3 *tp)
7333 {
7334         int i;
7335
7336         for (i = 0; i < tp->irq_cnt; i++) {
7337                 struct tg3_napi *tnapi = &tp->napi[i];
7338
7339                 if (tnapi->tx_ring) {
7340                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7341                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7342                         tnapi->tx_ring = NULL;
7343                 }
7344
7345                 kfree(tnapi->tx_buffers);
7346                 tnapi->tx_buffers = NULL;
7347
7348                 if (tnapi->rx_rcb) {
7349                         dma_free_coherent(&tp->pdev->dev,
7350                                           TG3_RX_RCB_RING_BYTES(tp),
7351                                           tnapi->rx_rcb,
7352                                           tnapi->rx_rcb_mapping);
7353                         tnapi->rx_rcb = NULL;
7354                 }
7355
7356                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7357
7358                 if (tnapi->hw_status) {
7359                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7360                                           tnapi->hw_status,
7361                                           tnapi->status_mapping);
7362                         tnapi->hw_status = NULL;
7363                 }
7364         }
7365
7366         if (tp->hw_stats) {
7367                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7368                                   tp->hw_stats, tp->stats_mapping);
7369                 tp->hw_stats = NULL;
7370         }
7371 }
7372
7373 /*
7374  * Must not be invoked with interrupt sources disabled and
7375  * the hardware shutdown down.  Can sleep.
7376  */
7377 static int tg3_alloc_consistent(struct tg3 *tp)
7378 {
7379         int i;
7380
7381         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7382                                           sizeof(struct tg3_hw_stats),
7383                                           &tp->stats_mapping,
7384                                           GFP_KERNEL);
7385         if (!tp->hw_stats)
7386                 goto err_out;
7387
7388         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7389
7390         for (i = 0; i < tp->irq_cnt; i++) {
7391                 struct tg3_napi *tnapi = &tp->napi[i];
7392                 struct tg3_hw_status *sblk;
7393
7394                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7395                                                       TG3_HW_STATUS_SIZE,
7396                                                       &tnapi->status_mapping,
7397                                                       GFP_KERNEL);
7398                 if (!tnapi->hw_status)
7399                         goto err_out;
7400
7401                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7402                 sblk = tnapi->hw_status;
7403
7404                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7405                         goto err_out;
7406
7407                 /* If multivector TSS is enabled, vector 0 does not handle
7408                  * tx interrupts.  Don't allocate any resources for it.
7409                  */
7410                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7411                     (i && tg3_flag(tp, ENABLE_TSS))) {
7412                         tnapi->tx_buffers = kzalloc(
7413                                                sizeof(struct tg3_tx_ring_info) *
7414                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7415                         if (!tnapi->tx_buffers)
7416                                 goto err_out;
7417
7418                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7419                                                             TG3_TX_RING_BYTES,
7420                                                         &tnapi->tx_desc_mapping,
7421                                                             GFP_KERNEL);
7422                         if (!tnapi->tx_ring)
7423                                 goto err_out;
7424                 }
7425
7426                 /*
7427                  * When RSS is enabled, the status block format changes
7428                  * slightly.  The "rx_jumbo_consumer", "reserved",
7429                  * and "rx_mini_consumer" members get mapped to the
7430                  * other three rx return ring producer indexes.
7431                  */
7432                 switch (i) {
7433                 default:
7434                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7435                         break;
7436                 case 2:
7437                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7438                         break;
7439                 case 3:
7440                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7441                         break;
7442                 case 4:
7443                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7444                         break;
7445                 }
7446
7447                 /*
7448                  * If multivector RSS is enabled, vector 0 does not handle
7449                  * rx or tx interrupts.  Don't allocate any resources for it.
7450                  */
7451                 if (!i && tg3_flag(tp, ENABLE_RSS))
7452                         continue;
7453
7454                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7455                                                    TG3_RX_RCB_RING_BYTES(tp),
7456                                                    &tnapi->rx_rcb_mapping,
7457                                                    GFP_KERNEL);
7458                 if (!tnapi->rx_rcb)
7459                         goto err_out;
7460
7461                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7462         }
7463
7464         return 0;
7465
7466 err_out:
7467         tg3_free_consistent(tp);
7468         return -ENOMEM;
7469 }
7470
7471 #define MAX_WAIT_CNT 1000
7472
7473 /* To stop a block, clear the enable bit and poll till it
7474  * clears.  tp->lock is held.
7475  */
7476 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7477 {
7478         unsigned int i;
7479         u32 val;
7480
7481         if (tg3_flag(tp, 5705_PLUS)) {
7482                 switch (ofs) {
7483                 case RCVLSC_MODE:
7484                 case DMAC_MODE:
7485                 case MBFREE_MODE:
7486                 case BUFMGR_MODE:
7487                 case MEMARB_MODE:
7488                         /* We can't enable/disable these bits of the
7489                          * 5705/5750, just say success.
7490                          */
7491                         return 0;
7492
7493                 default:
7494                         break;
7495                 }
7496         }
7497
7498         val = tr32(ofs);
7499         val &= ~enable_bit;
7500         tw32_f(ofs, val);
7501
7502         for (i = 0; i < MAX_WAIT_CNT; i++) {
7503                 udelay(100);
7504                 val = tr32(ofs);
7505                 if ((val & enable_bit) == 0)
7506                         break;
7507         }
7508
7509         if (i == MAX_WAIT_CNT && !silent) {
7510                 dev_err(&tp->pdev->dev,
7511                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7512                         ofs, enable_bit);
7513                 return -ENODEV;
7514         }
7515
7516         return 0;
7517 }
7518
7519 /* tp->lock is held. */
7520 static int tg3_abort_hw(struct tg3 *tp, int silent)
7521 {
7522         int i, err;
7523
7524         tg3_disable_ints(tp);
7525
7526         tp->rx_mode &= ~RX_MODE_ENABLE;
7527         tw32_f(MAC_RX_MODE, tp->rx_mode);
7528         udelay(10);
7529
7530         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7531         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7532         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7533         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7534         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7535         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7536
7537         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7538         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7539         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7540         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7541         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7542         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7543         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7544
7545         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7546         tw32_f(MAC_MODE, tp->mac_mode);
7547         udelay(40);
7548
7549         tp->tx_mode &= ~TX_MODE_ENABLE;
7550         tw32_f(MAC_TX_MODE, tp->tx_mode);
7551
7552         for (i = 0; i < MAX_WAIT_CNT; i++) {
7553                 udelay(100);
7554                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7555                         break;
7556         }
7557         if (i >= MAX_WAIT_CNT) {
7558                 dev_err(&tp->pdev->dev,
7559                         "%s timed out, TX_MODE_ENABLE will not clear "
7560                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7561                 err |= -ENODEV;
7562         }
7563
7564         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7565         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7566         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7567
7568         tw32(FTQ_RESET, 0xffffffff);
7569         tw32(FTQ_RESET, 0x00000000);
7570
7571         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7572         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7573
7574         for (i = 0; i < tp->irq_cnt; i++) {
7575                 struct tg3_napi *tnapi = &tp->napi[i];
7576                 if (tnapi->hw_status)
7577                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7578         }
7579
7580         return err;
7581 }
7582
7583 /* Save PCI command register before chip reset */
7584 static void tg3_save_pci_state(struct tg3 *tp)
7585 {
7586         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7587 }
7588
7589 /* Restore PCI state after chip reset */
7590 static void tg3_restore_pci_state(struct tg3 *tp)
7591 {
7592         u32 val;
7593
7594         /* Re-enable indirect register accesses. */
7595         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7596                                tp->misc_host_ctrl);
7597
7598         /* Set MAX PCI retry to zero. */
7599         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7600         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7601             tg3_flag(tp, PCIX_MODE))
7602                 val |= PCISTATE_RETRY_SAME_DMA;
7603         /* Allow reads and writes to the APE register and memory space. */
7604         if (tg3_flag(tp, ENABLE_APE))
7605                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7606                        PCISTATE_ALLOW_APE_SHMEM_WR |
7607                        PCISTATE_ALLOW_APE_PSPACE_WR;
7608         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7609
7610         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7611
7612         if (!tg3_flag(tp, PCI_EXPRESS)) {
7613                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7614                                       tp->pci_cacheline_sz);
7615                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7616                                       tp->pci_lat_timer);
7617         }
7618
7619         /* Make sure PCI-X relaxed ordering bit is clear. */
7620         if (tg3_flag(tp, PCIX_MODE)) {
7621                 u16 pcix_cmd;
7622
7623                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7624                                      &pcix_cmd);
7625                 pcix_cmd &= ~PCI_X_CMD_ERO;
7626                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7627                                       pcix_cmd);
7628         }
7629
7630         if (tg3_flag(tp, 5780_CLASS)) {
7631
7632                 /* Chip reset on 5780 will reset MSI enable bit,
7633                  * so need to restore it.
7634                  */
7635                 if (tg3_flag(tp, USING_MSI)) {
7636                         u16 ctrl;
7637
7638                         pci_read_config_word(tp->pdev,
7639                                              tp->msi_cap + PCI_MSI_FLAGS,
7640                                              &ctrl);
7641                         pci_write_config_word(tp->pdev,
7642                                               tp->msi_cap + PCI_MSI_FLAGS,
7643                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7644                         val = tr32(MSGINT_MODE);
7645                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7646                 }
7647         }
7648 }
7649
7650 /* tp->lock is held. */
7651 static int tg3_chip_reset(struct tg3 *tp)
7652 {
7653         u32 val;
7654         void (*write_op)(struct tg3 *, u32, u32);
7655         int i, err;
7656
7657         tg3_nvram_lock(tp);
7658
7659         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7660
7661         /* No matching tg3_nvram_unlock() after this because
7662          * chip reset below will undo the nvram lock.
7663          */
7664         tp->nvram_lock_cnt = 0;
7665
7666         /* GRC_MISC_CFG core clock reset will clear the memory
7667          * enable bit in PCI register 4 and the MSI enable bit
7668          * on some chips, so we save relevant registers here.
7669          */
7670         tg3_save_pci_state(tp);
7671
7672         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7673             tg3_flag(tp, 5755_PLUS))
7674                 tw32(GRC_FASTBOOT_PC, 0);
7675
7676         /*
7677          * We must avoid the readl() that normally takes place.
7678          * It locks machines, causes machine checks, and other
7679          * fun things.  So, temporarily disable the 5701
7680          * hardware workaround, while we do the reset.
7681          */
7682         write_op = tp->write32;
7683         if (write_op == tg3_write_flush_reg32)
7684                 tp->write32 = tg3_write32;
7685
7686         /* Prevent the irq handler from reading or writing PCI registers
7687          * during chip reset when the memory enable bit in the PCI command
7688          * register may be cleared.  The chip does not generate interrupt
7689          * at this time, but the irq handler may still be called due to irq
7690          * sharing or irqpoll.
7691          */
7692         tg3_flag_set(tp, CHIP_RESETTING);
7693         for (i = 0; i < tp->irq_cnt; i++) {
7694                 struct tg3_napi *tnapi = &tp->napi[i];
7695                 if (tnapi->hw_status) {
7696                         tnapi->hw_status->status = 0;
7697                         tnapi->hw_status->status_tag = 0;
7698                 }
7699                 tnapi->last_tag = 0;
7700                 tnapi->last_irq_tag = 0;
7701         }
7702         smp_mb();
7703
7704         for (i = 0; i < tp->irq_cnt; i++)
7705                 synchronize_irq(tp->napi[i].irq_vec);
7706
7707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7708                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7709                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7710         }
7711
7712         /* do the reset */
7713         val = GRC_MISC_CFG_CORECLK_RESET;
7714
7715         if (tg3_flag(tp, PCI_EXPRESS)) {
7716                 /* Force PCIe 1.0a mode */
7717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7718                     !tg3_flag(tp, 57765_PLUS) &&
7719                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7720                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7721                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7722
7723                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7724                         tw32(GRC_MISC_CFG, (1 << 29));
7725                         val |= (1 << 29);
7726                 }
7727         }
7728
7729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7730                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7731                 tw32(GRC_VCPU_EXT_CTRL,
7732                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7733         }
7734
7735         /* Manage gphy power for all CPMU absent PCIe devices. */
7736         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7737                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7738
7739         tw32(GRC_MISC_CFG, val);
7740
7741         /* restore 5701 hardware bug workaround write method */
7742         tp->write32 = write_op;
7743
7744         /* Unfortunately, we have to delay before the PCI read back.
7745          * Some 575X chips even will not respond to a PCI cfg access
7746          * when the reset command is given to the chip.
7747          *
7748          * How do these hardware designers expect things to work
7749          * properly if the PCI write is posted for a long period
7750          * of time?  It is always necessary to have some method by
7751          * which a register read back can occur to push the write
7752          * out which does the reset.
7753          *
7754          * For most tg3 variants the trick below was working.
7755          * Ho hum...
7756          */
7757         udelay(120);
7758
7759         /* Flush PCI posted writes.  The normal MMIO registers
7760          * are inaccessible at this time so this is the only
7761          * way to make this reliably (actually, this is no longer
7762          * the case, see above).  I tried to use indirect
7763          * register read/write but this upset some 5701 variants.
7764          */
7765         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7766
7767         udelay(120);
7768
7769         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7770                 u16 val16;
7771
7772                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7773                         int i;
7774                         u32 cfg_val;
7775
7776                         /* Wait for link training to complete.  */
7777                         for (i = 0; i < 5000; i++)
7778                                 udelay(100);
7779
7780                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7781                         pci_write_config_dword(tp->pdev, 0xc4,
7782                                                cfg_val | (1 << 15));
7783                 }
7784
7785                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7786                 pci_read_config_word(tp->pdev,
7787                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7788                                      &val16);
7789                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7790                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7791                 /*
7792                  * Older PCIe devices only support the 128 byte
7793                  * MPS setting.  Enforce the restriction.
7794                  */
7795                 if (!tg3_flag(tp, CPMU_PRESENT))
7796                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7797                 pci_write_config_word(tp->pdev,
7798                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7799                                       val16);
7800
7801                 /* Clear error status */
7802                 pci_write_config_word(tp->pdev,
7803                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7804                                       PCI_EXP_DEVSTA_CED |
7805                                       PCI_EXP_DEVSTA_NFED |
7806                                       PCI_EXP_DEVSTA_FED |
7807                                       PCI_EXP_DEVSTA_URD);
7808         }
7809
7810         tg3_restore_pci_state(tp);
7811
7812         tg3_flag_clear(tp, CHIP_RESETTING);
7813         tg3_flag_clear(tp, ERROR_PROCESSED);
7814
7815         val = 0;
7816         if (tg3_flag(tp, 5780_CLASS))
7817                 val = tr32(MEMARB_MODE);
7818         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7819
7820         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7821                 tg3_stop_fw(tp);
7822                 tw32(0x5000, 0x400);
7823         }
7824
7825         tw32(GRC_MODE, tp->grc_mode);
7826
7827         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7828                 val = tr32(0xc4);
7829
7830                 tw32(0xc4, val | (1 << 15));
7831         }
7832
7833         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7834             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7835                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7836                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7837                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7838                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7839         }
7840
7841         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7842                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7843                 val = tp->mac_mode;
7844         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7845                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7846                 val = tp->mac_mode;
7847         } else
7848                 val = 0;
7849
7850         tw32_f(MAC_MODE, val);
7851         udelay(40);
7852
7853         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7854
7855         err = tg3_poll_fw(tp);
7856         if (err)
7857                 return err;
7858
7859         tg3_mdio_start(tp);
7860
7861         if (tg3_flag(tp, PCI_EXPRESS) &&
7862             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7863             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7864             !tg3_flag(tp, 57765_PLUS)) {
7865                 val = tr32(0x7c00);
7866
7867                 tw32(0x7c00, val | (1 << 25));
7868         }
7869
7870         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7871                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7872                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7873         }
7874
7875         /* Reprobe ASF enable state.  */
7876         tg3_flag_clear(tp, ENABLE_ASF);
7877         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7878         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7879         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7880                 u32 nic_cfg;
7881
7882                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7883                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7884                         tg3_flag_set(tp, ENABLE_ASF);
7885                         tp->last_event_jiffies = jiffies;
7886                         if (tg3_flag(tp, 5750_PLUS))
7887                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7888                 }
7889         }
7890
7891         return 0;
7892 }
7893
7894 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
7895                                                  struct rtnl_link_stats64 *);
7896 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
7897                                                 struct tg3_ethtool_stats *);
7898
7899 /* tp->lock is held. */
7900 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7901 {
7902         int err;
7903
7904         tg3_stop_fw(tp);
7905
7906         tg3_write_sig_pre_reset(tp, kind);
7907
7908         tg3_abort_hw(tp, silent);
7909         err = tg3_chip_reset(tp);
7910
7911         __tg3_set_mac_addr(tp, 0);
7912
7913         tg3_write_sig_legacy(tp, kind);
7914         tg3_write_sig_post_reset(tp, kind);
7915
7916         if (tp->hw_stats) {
7917                 /* Save the stats across chip resets... */
7918                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
7919                 tg3_get_estats(tp, &tp->estats_prev);
7920
7921                 /* And make sure the next sample is new data */
7922                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7923         }
7924
7925         if (err)
7926                 return err;
7927
7928         return 0;
7929 }
7930
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7932 {
7933         struct tg3 *tp = netdev_priv(dev);
7934         struct sockaddr *addr = p;
7935         int err = 0, skip_mac_1 = 0;
7936
7937         if (!is_valid_ether_addr(addr->sa_data))
7938                 return -EINVAL;
7939
7940         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7941
7942         if (!netif_running(dev))
7943                 return 0;
7944
7945         if (tg3_flag(tp, ENABLE_ASF)) {
7946                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7947
7948                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7949                 addr0_low = tr32(MAC_ADDR_0_LOW);
7950                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7951                 addr1_low = tr32(MAC_ADDR_1_LOW);
7952
7953                 /* Skip MAC addr 1 if ASF is using it. */
7954                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955                     !(addr1_high == 0 && addr1_low == 0))
7956                         skip_mac_1 = 1;
7957         }
7958         spin_lock_bh(&tp->lock);
7959         __tg3_set_mac_addr(tp, skip_mac_1);
7960         spin_unlock_bh(&tp->lock);
7961
7962         return err;
7963 }
7964
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967                            dma_addr_t mapping, u32 maxlen_flags,
7968                            u32 nic_addr)
7969 {
7970         tg3_write_mem(tp,
7971                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972                       ((u64) mapping >> 32));
7973         tg3_write_mem(tp,
7974                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975                       ((u64) mapping & 0xffffffff));
7976         tg3_write_mem(tp,
7977                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7978                        maxlen_flags);
7979
7980         if (!tg3_flag(tp, 5705_PLUS))
7981                 tg3_write_mem(tp,
7982                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7983                               nic_addr);
7984 }
7985
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7988 {
7989         int i;
7990
7991         if (!tg3_flag(tp, ENABLE_TSS)) {
7992                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7995         } else {
7996                 tw32(HOSTCC_TXCOL_TICKS, 0);
7997                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7998                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7999         }
8000
8001         if (!tg3_flag(tp, ENABLE_RSS)) {
8002                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8005         } else {
8006                 tw32(HOSTCC_RXCOL_TICKS, 0);
8007                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8008                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8009         }
8010
8011         if (!tg3_flag(tp, 5705_PLUS)) {
8012                 u32 val = ec->stats_block_coalesce_usecs;
8013
8014                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8016
8017                 if (!netif_carrier_ok(tp->dev))
8018                         val = 0;
8019
8020                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8021         }
8022
8023         for (i = 0; i < tp->irq_cnt - 1; i++) {
8024                 u32 reg;
8025
8026                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027                 tw32(reg, ec->rx_coalesce_usecs);
8028                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029                 tw32(reg, ec->rx_max_coalesced_frames);
8030                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8032
8033                 if (tg3_flag(tp, ENABLE_TSS)) {
8034                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035                         tw32(reg, ec->tx_coalesce_usecs);
8036                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037                         tw32(reg, ec->tx_max_coalesced_frames);
8038                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8040                 }
8041         }
8042
8043         for (; i < tp->irq_max - 1; i++) {
8044                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8047
8048                 if (tg3_flag(tp, ENABLE_TSS)) {
8049                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8052                 }
8053         }
8054 }
8055
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8058 {
8059         int i;
8060         u32 stblk, txrcb, rxrcb, limit;
8061         struct tg3_napi *tnapi = &tp->napi[0];
8062
8063         /* Disable all transmit rings but the first. */
8064         if (!tg3_flag(tp, 5705_PLUS))
8065                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066         else if (tg3_flag(tp, 5717_PLUS))
8067                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068         else if (tg3_flag(tp, 57765_CLASS))
8069                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8070         else
8071                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072
8073         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076                               BDINFO_FLAGS_DISABLED);
8077
8078
8079         /* Disable all receive return rings but the first. */
8080         if (tg3_flag(tp, 5717_PLUS))
8081                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082         else if (!tg3_flag(tp, 5705_PLUS))
8083                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085                  tg3_flag(tp, 57765_CLASS))
8086                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8087         else
8088                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089
8090         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093                               BDINFO_FLAGS_DISABLED);
8094
8095         /* Disable interrupts */
8096         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097         tp->napi[0].chk_msi_cnt = 0;
8098         tp->napi[0].last_rx_cons = 0;
8099         tp->napi[0].last_tx_cons = 0;
8100
8101         /* Zero mailbox registers. */
8102         if (tg3_flag(tp, SUPPORT_MSIX)) {
8103                 for (i = 1; i < tp->irq_max; i++) {
8104                         tp->napi[i].tx_prod = 0;
8105                         tp->napi[i].tx_cons = 0;
8106                         if (tg3_flag(tp, ENABLE_TSS))
8107                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8108                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110                         tp->napi[i].chk_msi_cnt = 0;
8111                         tp->napi[i].last_rx_cons = 0;
8112                         tp->napi[i].last_tx_cons = 0;
8113                 }
8114                 if (!tg3_flag(tp, ENABLE_TSS))
8115                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8116         } else {
8117                 tp->napi[0].tx_prod = 0;
8118                 tp->napi[0].tx_cons = 0;
8119                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8120                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8121         }
8122
8123         /* Make sure the NIC-based send BD rings are disabled. */
8124         if (!tg3_flag(tp, 5705_PLUS)) {
8125                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126                 for (i = 0; i < 16; i++)
8127                         tw32_tx_mbox(mbox + i * 8, 0);
8128         }
8129
8130         txrcb = NIC_SRAM_SEND_RCB;
8131         rxrcb = NIC_SRAM_RCV_RET_RCB;
8132
8133         /* Clear status block in ram. */
8134         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8135
8136         /* Set status block DMA address */
8137         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138              ((u64) tnapi->status_mapping >> 32));
8139         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140              ((u64) tnapi->status_mapping & 0xffffffff));
8141
8142         if (tnapi->tx_ring) {
8143                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144                                (TG3_TX_RING_SIZE <<
8145                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8146                                NIC_SRAM_TX_BUFFER_DESC);
8147                 txrcb += TG3_BDINFO_SIZE;
8148         }
8149
8150         if (tnapi->rx_rcb) {
8151                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152                                (tp->rx_ret_ring_mask + 1) <<
8153                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154                 rxrcb += TG3_BDINFO_SIZE;
8155         }
8156
8157         stblk = HOSTCC_STATBLCK_RING1;
8158
8159         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160                 u64 mapping = (u64)tnapi->status_mapping;
8161                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8163
8164                 /* Clear status block in ram. */
8165                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8166
8167                 if (tnapi->tx_ring) {
8168                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169                                        (TG3_TX_RING_SIZE <<
8170                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8171                                        NIC_SRAM_TX_BUFFER_DESC);
8172                         txrcb += TG3_BDINFO_SIZE;
8173                 }
8174
8175                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176                                ((tp->rx_ret_ring_mask + 1) <<
8177                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8178
8179                 stblk += 8;
8180                 rxrcb += TG3_BDINFO_SIZE;
8181         }
8182 }
8183
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8185 {
8186         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8187
8188         if (!tg3_flag(tp, 5750_PLUS) ||
8189             tg3_flag(tp, 5780_CLASS) ||
8190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8192             tg3_flag(tp, 57765_PLUS))
8193                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8194         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8195                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8196                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8197         else
8198                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8199
8200         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8201         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8202
8203         val = min(nic_rep_thresh, host_rep_thresh);
8204         tw32(RCVBDI_STD_THRESH, val);
8205
8206         if (tg3_flag(tp, 57765_PLUS))
8207                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8208
8209         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8210                 return;
8211
8212         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8213
8214         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8215
8216         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8217         tw32(RCVBDI_JUMBO_THRESH, val);
8218
8219         if (tg3_flag(tp, 57765_PLUS))
8220                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8221 }
8222
8223 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8224 {
8225         int i;
8226
8227         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8228                 tp->rss_ind_tbl[i] =
8229                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8230 }
8231
8232 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8233 {
8234         int i;
8235
8236         if (!tg3_flag(tp, SUPPORT_MSIX))
8237                 return;
8238
8239         if (tp->irq_cnt <= 2) {
8240                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8241                 return;
8242         }
8243
8244         /* Validate table against current IRQ count */
8245         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8246                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8247                         break;
8248         }
8249
8250         if (i != TG3_RSS_INDIR_TBL_SIZE)
8251                 tg3_rss_init_dflt_indir_tbl(tp);
8252 }
8253
8254 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8255 {
8256         int i = 0;
8257         u32 reg = MAC_RSS_INDIR_TBL_0;
8258
8259         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8260                 u32 val = tp->rss_ind_tbl[i];
8261                 i++;
8262                 for (; i % 8; i++) {
8263                         val <<= 4;
8264                         val |= tp->rss_ind_tbl[i];
8265                 }
8266                 tw32(reg, val);
8267                 reg += 4;
8268         }
8269 }
8270
8271 /* tp->lock is held. */
8272 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8273 {
8274         u32 val, rdmac_mode;
8275         int i, err, limit;
8276         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8277
8278         tg3_disable_ints(tp);
8279
8280         tg3_stop_fw(tp);
8281
8282         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8283
8284         if (tg3_flag(tp, INIT_COMPLETE))
8285                 tg3_abort_hw(tp, 1);
8286
8287         /* Enable MAC control of LPI */
8288         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8289                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8290                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8291                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8292
8293                 tw32_f(TG3_CPMU_EEE_CTRL,
8294                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8295
8296                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8297                       TG3_CPMU_EEEMD_LPI_IN_TX |
8298                       TG3_CPMU_EEEMD_LPI_IN_RX |
8299                       TG3_CPMU_EEEMD_EEE_ENABLE;
8300
8301                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8302                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8303
8304                 if (tg3_flag(tp, ENABLE_APE))
8305                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8306
8307                 tw32_f(TG3_CPMU_EEE_MODE, val);
8308
8309                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8310                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8311                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8312
8313                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8314                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8315                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8316         }
8317
8318         if (reset_phy)
8319                 tg3_phy_reset(tp);
8320
8321         err = tg3_chip_reset(tp);
8322         if (err)
8323                 return err;
8324
8325         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8326
8327         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8328                 val = tr32(TG3_CPMU_CTRL);
8329                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8330                 tw32(TG3_CPMU_CTRL, val);
8331
8332                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8333                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8334                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8335                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8336
8337                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8338                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8339                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8340                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8341
8342                 val = tr32(TG3_CPMU_HST_ACC);
8343                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8344                 val |= CPMU_HST_ACC_MACCLK_6_25;
8345                 tw32(TG3_CPMU_HST_ACC, val);
8346         }
8347
8348         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8349                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8350                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8351                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8352                 tw32(PCIE_PWR_MGMT_THRESH, val);
8353
8354                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8355                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8356
8357                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8358
8359                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8360                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8361         }
8362
8363         if (tg3_flag(tp, L1PLLPD_EN)) {
8364                 u32 grc_mode = tr32(GRC_MODE);
8365
8366                 /* Access the lower 1K of PL PCIE block registers. */
8367                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8368                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8369
8370                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8371                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8372                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8373
8374                 tw32(GRC_MODE, grc_mode);
8375         }
8376
8377         if (tg3_flag(tp, 57765_CLASS)) {
8378                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8379                         u32 grc_mode = tr32(GRC_MODE);
8380
8381                         /* Access the lower 1K of PL PCIE block registers. */
8382                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8383                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8384
8385                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8386                                    TG3_PCIE_PL_LO_PHYCTL5);
8387                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8388                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8389
8390                         tw32(GRC_MODE, grc_mode);
8391                 }
8392
8393                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8394                         u32 grc_mode = tr32(GRC_MODE);
8395
8396                         /* Access the lower 1K of DL PCIE block registers. */
8397                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8398                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8399
8400                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8401                                    TG3_PCIE_DL_LO_FTSMAX);
8402                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8403                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8404                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8405
8406                         tw32(GRC_MODE, grc_mode);
8407                 }
8408
8409                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8410                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8411                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8412                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8413         }
8414
8415         /* This works around an issue with Athlon chipsets on
8416          * B3 tigon3 silicon.  This bit has no effect on any
8417          * other revision.  But do not set this on PCI Express
8418          * chips and don't even touch the clocks if the CPMU is present.
8419          */
8420         if (!tg3_flag(tp, CPMU_PRESENT)) {
8421                 if (!tg3_flag(tp, PCI_EXPRESS))
8422                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8423                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8424         }
8425
8426         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8427             tg3_flag(tp, PCIX_MODE)) {
8428                 val = tr32(TG3PCI_PCISTATE);
8429                 val |= PCISTATE_RETRY_SAME_DMA;
8430                 tw32(TG3PCI_PCISTATE, val);
8431         }
8432
8433         if (tg3_flag(tp, ENABLE_APE)) {
8434                 /* Allow reads and writes to the
8435                  * APE register and memory space.
8436                  */
8437                 val = tr32(TG3PCI_PCISTATE);
8438                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8439                        PCISTATE_ALLOW_APE_SHMEM_WR |
8440                        PCISTATE_ALLOW_APE_PSPACE_WR;
8441                 tw32(TG3PCI_PCISTATE, val);
8442         }
8443
8444         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8445                 /* Enable some hw fixes.  */
8446                 val = tr32(TG3PCI_MSI_DATA);
8447                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8448                 tw32(TG3PCI_MSI_DATA, val);
8449         }
8450
8451         /* Descriptor ring init may make accesses to the
8452          * NIC SRAM area to setup the TX descriptors, so we
8453          * can only do this after the hardware has been
8454          * successfully reset.
8455          */
8456         err = tg3_init_rings(tp);
8457         if (err)
8458                 return err;
8459
8460         if (tg3_flag(tp, 57765_PLUS)) {
8461                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8462                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8463                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8464                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8465                 if (!tg3_flag(tp, 57765_CLASS) &&
8466                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8467                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8468                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8469         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8470                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8471                 /* This value is determined during the probe time DMA
8472                  * engine test, tg3_test_dma.
8473                  */
8474                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8475         }
8476
8477         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8478                           GRC_MODE_4X_NIC_SEND_RINGS |
8479                           GRC_MODE_NO_TX_PHDR_CSUM |
8480                           GRC_MODE_NO_RX_PHDR_CSUM);
8481         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8482
8483         /* Pseudo-header checksum is done by hardware logic and not
8484          * the offload processers, so make the chip do the pseudo-
8485          * header checksums on receive.  For transmit it is more
8486          * convenient to do the pseudo-header checksum in software
8487          * as Linux does that on transmit for us in all cases.
8488          */
8489         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8490
8491         tw32(GRC_MODE,
8492              tp->grc_mode |
8493              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8494
8495         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8496         val = tr32(GRC_MISC_CFG);
8497         val &= ~0xff;
8498         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8499         tw32(GRC_MISC_CFG, val);
8500
8501         /* Initialize MBUF/DESC pool. */
8502         if (tg3_flag(tp, 5750_PLUS)) {
8503                 /* Do nothing.  */
8504         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8505                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8506                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8507                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8508                 else
8509                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8510                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8511                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8512         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8513                 int fw_len;
8514
8515                 fw_len = tp->fw_len;
8516                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8517                 tw32(BUFMGR_MB_POOL_ADDR,
8518                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8519                 tw32(BUFMGR_MB_POOL_SIZE,
8520                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8521         }
8522
8523         if (tp->dev->mtu <= ETH_DATA_LEN) {
8524                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8525                      tp->bufmgr_config.mbuf_read_dma_low_water);
8526                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8527                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8528                 tw32(BUFMGR_MB_HIGH_WATER,
8529                      tp->bufmgr_config.mbuf_high_water);
8530         } else {
8531                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8532                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8533                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8534                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8535                 tw32(BUFMGR_MB_HIGH_WATER,
8536                      tp->bufmgr_config.mbuf_high_water_jumbo);
8537         }
8538         tw32(BUFMGR_DMA_LOW_WATER,
8539              tp->bufmgr_config.dma_low_water);
8540         tw32(BUFMGR_DMA_HIGH_WATER,
8541              tp->bufmgr_config.dma_high_water);
8542
8543         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8545                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8547             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8548             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8549                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8550         tw32(BUFMGR_MODE, val);
8551         for (i = 0; i < 2000; i++) {
8552                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8553                         break;
8554                 udelay(10);
8555         }
8556         if (i >= 2000) {
8557                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8558                 return -ENODEV;
8559         }
8560
8561         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8562                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8563
8564         tg3_setup_rxbd_thresholds(tp);
8565
8566         /* Initialize TG3_BDINFO's at:
8567          *  RCVDBDI_STD_BD:     standard eth size rx ring
8568          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8569          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8570          *
8571          * like so:
8572          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8573          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8574          *                              ring attribute flags
8575          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8576          *
8577          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8578          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8579          *
8580          * The size of each ring is fixed in the firmware, but the location is
8581          * configurable.
8582          */
8583         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8584              ((u64) tpr->rx_std_mapping >> 32));
8585         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8586              ((u64) tpr->rx_std_mapping & 0xffffffff));
8587         if (!tg3_flag(tp, 5717_PLUS))
8588                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8589                      NIC_SRAM_RX_BUFFER_DESC);
8590
8591         /* Disable the mini ring */
8592         if (!tg3_flag(tp, 5705_PLUS))
8593                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8594                      BDINFO_FLAGS_DISABLED);
8595
8596         /* Program the jumbo buffer descriptor ring control
8597          * blocks on those devices that have them.
8598          */
8599         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8600             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8601
8602                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8603                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8604                              ((u64) tpr->rx_jmb_mapping >> 32));
8605                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8606                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8607                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8608                               BDINFO_FLAGS_MAXLEN_SHIFT;
8609                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8610                              val | BDINFO_FLAGS_USE_EXT_RECV);
8611                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8612                             tg3_flag(tp, 57765_CLASS))
8613                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8614                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8615                 } else {
8616                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8617                              BDINFO_FLAGS_DISABLED);
8618                 }
8619
8620                 if (tg3_flag(tp, 57765_PLUS)) {
8621                         val = TG3_RX_STD_RING_SIZE(tp);
8622                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8623                         val |= (TG3_RX_STD_DMA_SZ << 2);
8624                 } else
8625                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8626         } else
8627                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8628
8629         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8630
8631         tpr->rx_std_prod_idx = tp->rx_pending;
8632         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8633
8634         tpr->rx_jmb_prod_idx =
8635                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8636         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8637
8638         tg3_rings_reset(tp);
8639
8640         /* Initialize MAC address and backoff seed. */
8641         __tg3_set_mac_addr(tp, 0);
8642
8643         /* MTU + ethernet header + FCS + optional VLAN tag */
8644         tw32(MAC_RX_MTU_SIZE,
8645              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8646
8647         /* The slot time is changed by tg3_setup_phy if we
8648          * run at gigabit with half duplex.
8649          */
8650         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8651               (6 << TX_LENGTHS_IPG_SHIFT) |
8652               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8653
8654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8655                 val |= tr32(MAC_TX_LENGTHS) &
8656                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8657                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8658
8659         tw32(MAC_TX_LENGTHS, val);
8660
8661         /* Receive rules. */
8662         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8663         tw32(RCVLPC_CONFIG, 0x0181);
8664
8665         /* Calculate RDMAC_MODE setting early, we need it to determine
8666          * the RCVLPC_STATE_ENABLE mask.
8667          */
8668         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8669                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8670                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8671                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8672                       RDMAC_MODE_LNGREAD_ENAB);
8673
8674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8675                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8676
8677         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8678             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8680                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8681                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8682                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8683
8684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8685             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8686                 if (tg3_flag(tp, TSO_CAPABLE) &&
8687                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8688                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8689                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8690                            !tg3_flag(tp, IS_5788)) {
8691                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8692                 }
8693         }
8694
8695         if (tg3_flag(tp, PCI_EXPRESS))
8696                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8697
8698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8699                 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8700
8701         if (tg3_flag(tp, HW_TSO_1) ||
8702             tg3_flag(tp, HW_TSO_2) ||
8703             tg3_flag(tp, HW_TSO_3))
8704                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8705
8706         if (tg3_flag(tp, 57765_PLUS) ||
8707             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8709                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8710
8711         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8712                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8713
8714         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8715             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8716             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8717             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8718             tg3_flag(tp, 57765_PLUS)) {
8719                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8720                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8721                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8722                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8723                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8724                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8725                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8726                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8727                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8728                 }
8729                 tw32(TG3_RDMA_RSRVCTRL_REG,
8730                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8731         }
8732
8733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8735                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8736                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8737                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8738                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8739         }
8740
8741         /* Receive/send statistics. */
8742         if (tg3_flag(tp, 5750_PLUS)) {
8743                 val = tr32(RCVLPC_STATS_ENABLE);
8744                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8745                 tw32(RCVLPC_STATS_ENABLE, val);
8746         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8747                    tg3_flag(tp, TSO_CAPABLE)) {
8748                 val = tr32(RCVLPC_STATS_ENABLE);
8749                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8750                 tw32(RCVLPC_STATS_ENABLE, val);
8751         } else {
8752                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8753         }
8754         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8755         tw32(SNDDATAI_STATSENAB, 0xffffff);
8756         tw32(SNDDATAI_STATSCTRL,
8757              (SNDDATAI_SCTRL_ENABLE |
8758               SNDDATAI_SCTRL_FASTUPD));
8759
8760         /* Setup host coalescing engine. */
8761         tw32(HOSTCC_MODE, 0);
8762         for (i = 0; i < 2000; i++) {
8763                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8764                         break;
8765                 udelay(10);
8766         }
8767
8768         __tg3_set_coalesce(tp, &tp->coal);
8769
8770         if (!tg3_flag(tp, 5705_PLUS)) {
8771                 /* Status/statistics block address.  See tg3_timer,
8772                  * the tg3_periodic_fetch_stats call there, and
8773                  * tg3_get_stats to see how this works for 5705/5750 chips.
8774                  */
8775                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8776                      ((u64) tp->stats_mapping >> 32));
8777                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8778                      ((u64) tp->stats_mapping & 0xffffffff));
8779                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8780
8781                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8782
8783                 /* Clear statistics and status block memory areas */
8784                 for (i = NIC_SRAM_STATS_BLK;
8785                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8786                      i += sizeof(u32)) {
8787                         tg3_write_mem(tp, i, 0);
8788                         udelay(40);
8789                 }
8790         }
8791
8792         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8793
8794         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8795         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8796         if (!tg3_flag(tp, 5705_PLUS))
8797                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8798
8799         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8800                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8801                 /* reset to prevent losing 1st rx packet intermittently */
8802                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8803                 udelay(10);
8804         }
8805
8806         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8807                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8808                         MAC_MODE_FHDE_ENABLE;
8809         if (tg3_flag(tp, ENABLE_APE))
8810                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8811         if (!tg3_flag(tp, 5705_PLUS) &&
8812             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8813             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8814                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8815         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8816         udelay(40);
8817
8818         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8819          * If TG3_FLAG_IS_NIC is zero, we should read the
8820          * register to preserve the GPIO settings for LOMs. The GPIOs,
8821          * whether used as inputs or outputs, are set by boot code after
8822          * reset.
8823          */
8824         if (!tg3_flag(tp, IS_NIC)) {
8825                 u32 gpio_mask;
8826
8827                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8828                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8829                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8830
8831                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8832                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8833                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8834
8835                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8836                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8837
8838                 tp->grc_local_ctrl &= ~gpio_mask;
8839                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8840
8841                 /* GPIO1 must be driven high for eeprom write protect */
8842                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8843                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8844                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8845         }
8846         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8847         udelay(100);
8848
8849         if (tg3_flag(tp, USING_MSIX)) {
8850                 val = tr32(MSGINT_MODE);
8851                 val |= MSGINT_MODE_ENABLE;
8852                 if (tp->irq_cnt > 1)
8853                         val |= MSGINT_MODE_MULTIVEC_EN;
8854                 if (!tg3_flag(tp, 1SHOT_MSI))
8855                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8856                 tw32(MSGINT_MODE, val);
8857         }
8858
8859         if (!tg3_flag(tp, 5705_PLUS)) {
8860                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8861                 udelay(40);
8862         }
8863
8864         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8865                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8866                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8867                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8868                WDMAC_MODE_LNGREAD_ENAB);
8869
8870         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8871             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8872                 if (tg3_flag(tp, TSO_CAPABLE) &&
8873                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8874                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8875                         /* nothing */
8876                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8877                            !tg3_flag(tp, IS_5788)) {
8878                         val |= WDMAC_MODE_RX_ACCEL;
8879                 }
8880         }
8881
8882         /* Enable host coalescing bug fix */
8883         if (tg3_flag(tp, 5755_PLUS))
8884                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8885
8886         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8887                 val |= WDMAC_MODE_BURST_ALL_DATA;
8888
8889         tw32_f(WDMAC_MODE, val);
8890         udelay(40);
8891
8892         if (tg3_flag(tp, PCIX_MODE)) {
8893                 u16 pcix_cmd;
8894
8895                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8896                                      &pcix_cmd);
8897                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8898                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8899                         pcix_cmd |= PCI_X_CMD_READ_2K;
8900                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8901                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8902                         pcix_cmd |= PCI_X_CMD_READ_2K;
8903                 }
8904                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8905                                       pcix_cmd);
8906         }
8907
8908         tw32_f(RDMAC_MODE, rdmac_mode);
8909         udelay(40);
8910
8911         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8912         if (!tg3_flag(tp, 5705_PLUS))
8913                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8914
8915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8916                 tw32(SNDDATAC_MODE,
8917                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8918         else
8919                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8920
8921         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8922         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8923         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8924         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8925                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8926         tw32(RCVDBDI_MODE, val);
8927         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8928         if (tg3_flag(tp, HW_TSO_1) ||
8929             tg3_flag(tp, HW_TSO_2) ||
8930             tg3_flag(tp, HW_TSO_3))
8931                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8932         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8933         if (tg3_flag(tp, ENABLE_TSS))
8934                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8935         tw32(SNDBDI_MODE, val);
8936         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8937
8938         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8939                 err = tg3_load_5701_a0_firmware_fix(tp);
8940                 if (err)
8941                         return err;
8942         }
8943
8944         if (tg3_flag(tp, TSO_CAPABLE)) {
8945                 err = tg3_load_tso_firmware(tp);
8946                 if (err)
8947                         return err;
8948         }
8949
8950         tp->tx_mode = TX_MODE_ENABLE;
8951
8952         if (tg3_flag(tp, 5755_PLUS) ||
8953             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8954                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8955
8956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8957                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8958                 tp->tx_mode &= ~val;
8959                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8960         }
8961
8962         tw32_f(MAC_TX_MODE, tp->tx_mode);
8963         udelay(100);
8964
8965         if (tg3_flag(tp, ENABLE_RSS)) {
8966                 tg3_rss_write_indir_tbl(tp);
8967
8968                 /* Setup the "secret" hash key. */
8969                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8970                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8971                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8972                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8973                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8974                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8975                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8976                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8977                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8978                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8979         }
8980
8981         tp->rx_mode = RX_MODE_ENABLE;
8982         if (tg3_flag(tp, 5755_PLUS))
8983                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8984
8985         if (tg3_flag(tp, ENABLE_RSS))
8986                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8987                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8988                                RX_MODE_RSS_IPV6_HASH_EN |
8989                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8990                                RX_MODE_RSS_IPV4_HASH_EN |
8991                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8992
8993         tw32_f(MAC_RX_MODE, tp->rx_mode);
8994         udelay(10);
8995
8996         tw32(MAC_LED_CTRL, tp->led_ctrl);
8997
8998         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8999         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9000                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9001                 udelay(10);
9002         }
9003         tw32_f(MAC_RX_MODE, tp->rx_mode);
9004         udelay(10);
9005
9006         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9007                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9008                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9009                         /* Set drive transmission level to 1.2V  */
9010                         /* only if the signal pre-emphasis bit is not set  */
9011                         val = tr32(MAC_SERDES_CFG);
9012                         val &= 0xfffff000;
9013                         val |= 0x880;
9014                         tw32(MAC_SERDES_CFG, val);
9015                 }
9016                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9017                         tw32(MAC_SERDES_CFG, 0x616000);
9018         }
9019
9020         /* Prevent chip from dropping frames when flow control
9021          * is enabled.
9022          */
9023         if (tg3_flag(tp, 57765_CLASS))
9024                 val = 1;
9025         else
9026                 val = 2;
9027         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9028
9029         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9030             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9031                 /* Use hardware link auto-negotiation */
9032                 tg3_flag_set(tp, HW_AUTONEG);
9033         }
9034
9035         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9036             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9037                 u32 tmp;
9038
9039                 tmp = tr32(SERDES_RX_CTRL);
9040                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9041                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9042                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9043                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9044         }
9045
9046         if (!tg3_flag(tp, USE_PHYLIB)) {
9047                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9048                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9049                         tp->link_config.speed = tp->link_config.orig_speed;
9050                         tp->link_config.duplex = tp->link_config.orig_duplex;
9051                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9052                 }
9053
9054                 err = tg3_setup_phy(tp, 0);
9055                 if (err)
9056                         return err;
9057
9058                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9059                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9060                         u32 tmp;
9061
9062                         /* Clear CRC stats. */
9063                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9064                                 tg3_writephy(tp, MII_TG3_TEST1,
9065                                              tmp | MII_TG3_TEST1_CRC_EN);
9066                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9067                         }
9068                 }
9069         }
9070
9071         __tg3_set_rx_mode(tp->dev);
9072
9073         /* Initialize receive rules. */
9074         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9075         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9076         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9077         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9078
9079         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9080                 limit = 8;
9081         else
9082                 limit = 16;
9083         if (tg3_flag(tp, ENABLE_ASF))
9084                 limit -= 4;
9085         switch (limit) {
9086         case 16:
9087                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9088         case 15:
9089                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9090         case 14:
9091                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9092         case 13:
9093                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9094         case 12:
9095                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9096         case 11:
9097                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9098         case 10:
9099                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9100         case 9:
9101                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9102         case 8:
9103                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9104         case 7:
9105                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9106         case 6:
9107                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9108         case 5:
9109                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9110         case 4:
9111                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9112         case 3:
9113                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9114         case 2:
9115         case 1:
9116
9117         default:
9118                 break;
9119         }
9120
9121         if (tg3_flag(tp, ENABLE_APE))
9122                 /* Write our heartbeat update interval to APE. */
9123                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9124                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9125
9126         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9127
9128         return 0;
9129 }
9130
9131 /* Called at device open time to get the chip ready for
9132  * packet processing.  Invoked with tp->lock held.
9133  */
9134 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9135 {
9136         tg3_switch_clocks(tp);
9137
9138         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9139
9140         return tg3_reset_hw(tp, reset_phy);
9141 }
9142
9143 #define TG3_STAT_ADD32(PSTAT, REG) \
9144 do {    u32 __val = tr32(REG); \
9145         (PSTAT)->low += __val; \
9146         if ((PSTAT)->low < __val) \
9147                 (PSTAT)->high += 1; \
9148 } while (0)
9149
9150 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9151 {
9152         struct tg3_hw_stats *sp = tp->hw_stats;
9153
9154         if (!netif_carrier_ok(tp->dev))
9155                 return;
9156
9157         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9158         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9159         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9160         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9161         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9162         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9163         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9164         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9165         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9166         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9167         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9168         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9169         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9170
9171         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9172         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9173         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9174         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9175         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9176         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9177         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9178         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9179         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9180         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9181         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9182         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9183         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9184         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9185
9186         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9187         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9188             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9189             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9190                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9191         } else {
9192                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9193                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9194                 if (val) {
9195                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9196                         sp->rx_discards.low += val;
9197                         if (sp->rx_discards.low < val)
9198                                 sp->rx_discards.high += 1;
9199                 }
9200                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9201         }
9202         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9203 }
9204
9205 static void tg3_chk_missed_msi(struct tg3 *tp)
9206 {
9207         u32 i;
9208
9209         for (i = 0; i < tp->irq_cnt; i++) {
9210                 struct tg3_napi *tnapi = &tp->napi[i];
9211
9212                 if (tg3_has_work(tnapi)) {
9213                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9214                             tnapi->last_tx_cons == tnapi->tx_cons) {
9215                                 if (tnapi->chk_msi_cnt < 1) {
9216                                         tnapi->chk_msi_cnt++;
9217                                         return;
9218                                 }
9219                                 tg3_msi(0, tnapi);
9220                         }
9221                 }
9222                 tnapi->chk_msi_cnt = 0;
9223                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9224                 tnapi->last_tx_cons = tnapi->tx_cons;
9225         }
9226 }
9227
9228 static void tg3_timer(unsigned long __opaque)
9229 {
9230         struct tg3 *tp = (struct tg3 *) __opaque;
9231
9232         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9233                 goto restart_timer;
9234
9235         spin_lock(&tp->lock);
9236
9237         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9238             tg3_flag(tp, 57765_CLASS))
9239                 tg3_chk_missed_msi(tp);
9240
9241         if (!tg3_flag(tp, TAGGED_STATUS)) {
9242                 /* All of this garbage is because when using non-tagged
9243                  * IRQ status the mailbox/status_block protocol the chip
9244                  * uses with the cpu is race prone.
9245                  */
9246                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9247                         tw32(GRC_LOCAL_CTRL,
9248                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9249                 } else {
9250                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9251                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9252                 }
9253
9254                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9255                         spin_unlock(&tp->lock);
9256                         tg3_reset_task_schedule(tp);
9257                         goto restart_timer;
9258                 }
9259         }
9260
9261         /* This part only runs once per second. */
9262         if (!--tp->timer_counter) {
9263                 if (tg3_flag(tp, 5705_PLUS))
9264                         tg3_periodic_fetch_stats(tp);
9265
9266                 if (tp->setlpicnt && !--tp->setlpicnt)
9267                         tg3_phy_eee_enable(tp);
9268
9269                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9270                         u32 mac_stat;
9271                         int phy_event;
9272
9273                         mac_stat = tr32(MAC_STATUS);
9274
9275                         phy_event = 0;
9276                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9277                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9278                                         phy_event = 1;
9279                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9280                                 phy_event = 1;
9281
9282                         if (phy_event)
9283                                 tg3_setup_phy(tp, 0);
9284                 } else if (tg3_flag(tp, POLL_SERDES)) {
9285                         u32 mac_stat = tr32(MAC_STATUS);
9286                         int need_setup = 0;
9287
9288                         if (netif_carrier_ok(tp->dev) &&
9289                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9290                                 need_setup = 1;
9291                         }
9292                         if (!netif_carrier_ok(tp->dev) &&
9293                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9294                                          MAC_STATUS_SIGNAL_DET))) {
9295                                 need_setup = 1;
9296                         }
9297                         if (need_setup) {
9298                                 if (!tp->serdes_counter) {
9299                                         tw32_f(MAC_MODE,
9300                                              (tp->mac_mode &
9301                                               ~MAC_MODE_PORT_MODE_MASK));
9302                                         udelay(40);
9303                                         tw32_f(MAC_MODE, tp->mac_mode);
9304                                         udelay(40);
9305                                 }
9306                                 tg3_setup_phy(tp, 0);
9307                         }
9308                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9309                            tg3_flag(tp, 5780_CLASS)) {
9310                         tg3_serdes_parallel_detect(tp);
9311                 }
9312
9313                 tp->timer_counter = tp->timer_multiplier;
9314         }
9315
9316         /* Heartbeat is only sent once every 2 seconds.
9317          *
9318          * The heartbeat is to tell the ASF firmware that the host
9319          * driver is still alive.  In the event that the OS crashes,
9320          * ASF needs to reset the hardware to free up the FIFO space
9321          * that may be filled with rx packets destined for the host.
9322          * If the FIFO is full, ASF will no longer function properly.
9323          *
9324          * Unintended resets have been reported on real time kernels
9325          * where the timer doesn't run on time.  Netpoll will also have
9326          * same problem.
9327          *
9328          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9329          * to check the ring condition when the heartbeat is expiring
9330          * before doing the reset.  This will prevent most unintended
9331          * resets.
9332          */
9333         if (!--tp->asf_counter) {
9334                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9335                         tg3_wait_for_event_ack(tp);
9336
9337                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9338                                       FWCMD_NICDRV_ALIVE3);
9339                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9340                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9341                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9342
9343                         tg3_generate_fw_event(tp);
9344                 }
9345                 tp->asf_counter = tp->asf_multiplier;
9346         }
9347
9348         spin_unlock(&tp->lock);
9349
9350 restart_timer:
9351         tp->timer.expires = jiffies + tp->timer_offset;
9352         add_timer(&tp->timer);
9353 }
9354
9355 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9356 {
9357         irq_handler_t fn;
9358         unsigned long flags;
9359         char *name;
9360         struct tg3_napi *tnapi = &tp->napi[irq_num];
9361
9362         if (tp->irq_cnt == 1)
9363                 name = tp->dev->name;
9364         else {
9365                 name = &tnapi->irq_lbl[0];
9366                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9367                 name[IFNAMSIZ-1] = 0;
9368         }
9369
9370         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9371                 fn = tg3_msi;
9372                 if (tg3_flag(tp, 1SHOT_MSI))
9373                         fn = tg3_msi_1shot;
9374                 flags = 0;
9375         } else {
9376                 fn = tg3_interrupt;
9377                 if (tg3_flag(tp, TAGGED_STATUS))
9378                         fn = tg3_interrupt_tagged;
9379                 flags = IRQF_SHARED;
9380         }
9381
9382         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9383 }
9384
9385 static int tg3_test_interrupt(struct tg3 *tp)
9386 {
9387         struct tg3_napi *tnapi = &tp->napi[0];
9388         struct net_device *dev = tp->dev;
9389         int err, i, intr_ok = 0;
9390         u32 val;
9391
9392         if (!netif_running(dev))
9393                 return -ENODEV;
9394
9395         tg3_disable_ints(tp);
9396
9397         free_irq(tnapi->irq_vec, tnapi);
9398
9399         /*
9400          * Turn off MSI one shot mode.  Otherwise this test has no
9401          * observable way to know whether the interrupt was delivered.
9402          */
9403         if (tg3_flag(tp, 57765_PLUS)) {
9404                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9405                 tw32(MSGINT_MODE, val);
9406         }
9407
9408         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9409                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9410         if (err)
9411                 return err;
9412
9413         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9414         tg3_enable_ints(tp);
9415
9416         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9417                tnapi->coal_now);
9418
9419         for (i = 0; i < 5; i++) {
9420                 u32 int_mbox, misc_host_ctrl;
9421
9422                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9423                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9424
9425                 if ((int_mbox != 0) ||
9426                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9427                         intr_ok = 1;
9428                         break;
9429                 }
9430
9431                 if (tg3_flag(tp, 57765_PLUS) &&
9432                     tnapi->hw_status->status_tag != tnapi->last_tag)
9433                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9434
9435                 msleep(10);
9436         }
9437
9438         tg3_disable_ints(tp);
9439
9440         free_irq(tnapi->irq_vec, tnapi);
9441
9442         err = tg3_request_irq(tp, 0);
9443
9444         if (err)
9445                 return err;
9446
9447         if (intr_ok) {
9448                 /* Reenable MSI one shot mode. */
9449                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9450                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9451                         tw32(MSGINT_MODE, val);
9452                 }
9453                 return 0;
9454         }
9455
9456         return -EIO;
9457 }
9458
9459 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9460  * successfully restored
9461  */
9462 static int tg3_test_msi(struct tg3 *tp)
9463 {
9464         int err;
9465         u16 pci_cmd;
9466
9467         if (!tg3_flag(tp, USING_MSI))
9468                 return 0;
9469
9470         /* Turn off SERR reporting in case MSI terminates with Master
9471          * Abort.
9472          */
9473         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9474         pci_write_config_word(tp->pdev, PCI_COMMAND,
9475                               pci_cmd & ~PCI_COMMAND_SERR);
9476
9477         err = tg3_test_interrupt(tp);
9478
9479         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9480
9481         if (!err)
9482                 return 0;
9483
9484         /* other failures */
9485         if (err != -EIO)
9486                 return err;
9487
9488         /* MSI test failed, go back to INTx mode */
9489         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9490                     "to INTx mode. Please report this failure to the PCI "
9491                     "maintainer and include system chipset information\n");
9492
9493         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9494
9495         pci_disable_msi(tp->pdev);
9496
9497         tg3_flag_clear(tp, USING_MSI);
9498         tp->napi[0].irq_vec = tp->pdev->irq;
9499
9500         err = tg3_request_irq(tp, 0);
9501         if (err)
9502                 return err;
9503
9504         /* Need to reset the chip because the MSI cycle may have terminated
9505          * with Master Abort.
9506          */
9507         tg3_full_lock(tp, 1);
9508
9509         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9510         err = tg3_init_hw(tp, 1);
9511
9512         tg3_full_unlock(tp);
9513
9514         if (err)
9515                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9516
9517         return err;
9518 }
9519
9520 static int tg3_request_firmware(struct tg3 *tp)
9521 {
9522         const __be32 *fw_data;
9523
9524         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9525                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9526                            tp->fw_needed);
9527                 return -ENOENT;
9528         }
9529
9530         fw_data = (void *)tp->fw->data;
9531
9532         /* Firmware blob starts with version numbers, followed by
9533          * start address and _full_ length including BSS sections
9534          * (which must be longer than the actual data, of course
9535          */
9536
9537         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9538         if (tp->fw_len < (tp->fw->size - 12)) {
9539                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9540                            tp->fw_len, tp->fw_needed);
9541                 release_firmware(tp->fw);
9542                 tp->fw = NULL;
9543                 return -EINVAL;
9544         }
9545
9546         /* We no longer need firmware; we have it. */
9547         tp->fw_needed = NULL;
9548         return 0;
9549 }
9550
9551 static bool tg3_enable_msix(struct tg3 *tp)
9552 {
9553         int i, rc;
9554         struct msix_entry msix_ent[tp->irq_max];
9555
9556         tp->irq_cnt = num_online_cpus();
9557         if (tp->irq_cnt > 1) {
9558                 /* We want as many rx rings enabled as there are cpus.
9559                  * In multiqueue MSI-X mode, the first MSI-X vector
9560                  * only deals with link interrupts, etc, so we add
9561                  * one to the number of vectors we are requesting.
9562                  */
9563                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9564         }
9565
9566         for (i = 0; i < tp->irq_max; i++) {
9567                 msix_ent[i].entry  = i;
9568                 msix_ent[i].vector = 0;
9569         }
9570
9571         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9572         if (rc < 0) {
9573                 return false;
9574         } else if (rc != 0) {
9575                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9576                         return false;
9577                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9578                               tp->irq_cnt, rc);
9579                 tp->irq_cnt = rc;
9580         }
9581
9582         for (i = 0; i < tp->irq_max; i++)
9583                 tp->napi[i].irq_vec = msix_ent[i].vector;
9584
9585         netif_set_real_num_tx_queues(tp->dev, 1);
9586         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9587         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9588                 pci_disable_msix(tp->pdev);
9589                 return false;
9590         }
9591
9592         if (tp->irq_cnt > 1) {
9593                 tg3_flag_set(tp, ENABLE_RSS);
9594
9595                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9596                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9597                         tg3_flag_set(tp, ENABLE_TSS);
9598                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9599                 }
9600         }
9601
9602         return true;
9603 }
9604
9605 static void tg3_ints_init(struct tg3 *tp)
9606 {
9607         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9608             !tg3_flag(tp, TAGGED_STATUS)) {
9609                 /* All MSI supporting chips should support tagged
9610                  * status.  Assert that this is the case.
9611                  */
9612                 netdev_warn(tp->dev,
9613                             "MSI without TAGGED_STATUS? Not using MSI\n");
9614                 goto defcfg;
9615         }
9616
9617         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9618                 tg3_flag_set(tp, USING_MSIX);
9619         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9620                 tg3_flag_set(tp, USING_MSI);
9621
9622         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9623                 u32 msi_mode = tr32(MSGINT_MODE);
9624                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9625                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9626                 if (!tg3_flag(tp, 1SHOT_MSI))
9627                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9628                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9629         }
9630 defcfg:
9631         if (!tg3_flag(tp, USING_MSIX)) {
9632                 tp->irq_cnt = 1;
9633                 tp->napi[0].irq_vec = tp->pdev->irq;
9634                 netif_set_real_num_tx_queues(tp->dev, 1);
9635                 netif_set_real_num_rx_queues(tp->dev, 1);
9636         }
9637 }
9638
9639 static void tg3_ints_fini(struct tg3 *tp)
9640 {
9641         if (tg3_flag(tp, USING_MSIX))
9642                 pci_disable_msix(tp->pdev);
9643         else if (tg3_flag(tp, USING_MSI))
9644                 pci_disable_msi(tp->pdev);
9645         tg3_flag_clear(tp, USING_MSI);
9646         tg3_flag_clear(tp, USING_MSIX);
9647         tg3_flag_clear(tp, ENABLE_RSS);
9648         tg3_flag_clear(tp, ENABLE_TSS);
9649 }
9650
9651 static int tg3_open(struct net_device *dev)
9652 {
9653         struct tg3 *tp = netdev_priv(dev);
9654         int i, err;
9655
9656         if (tp->fw_needed) {
9657                 err = tg3_request_firmware(tp);
9658                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9659                         if (err)
9660                                 return err;
9661                 } else if (err) {
9662                         netdev_warn(tp->dev, "TSO capability disabled\n");
9663                         tg3_flag_clear(tp, TSO_CAPABLE);
9664                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9665                         netdev_notice(tp->dev, "TSO capability restored\n");
9666                         tg3_flag_set(tp, TSO_CAPABLE);
9667                 }
9668         }
9669
9670         netif_carrier_off(tp->dev);
9671
9672         err = tg3_power_up(tp);
9673         if (err)
9674                 return err;
9675
9676         tg3_full_lock(tp, 0);
9677
9678         tg3_disable_ints(tp);
9679         tg3_flag_clear(tp, INIT_COMPLETE);
9680
9681         tg3_full_unlock(tp);
9682
9683         /*
9684          * Setup interrupts first so we know how
9685          * many NAPI resources to allocate
9686          */
9687         tg3_ints_init(tp);
9688
9689         tg3_rss_check_indir_tbl(tp);
9690
9691         /* The placement of this call is tied
9692          * to the setup and use of Host TX descriptors.
9693          */
9694         err = tg3_alloc_consistent(tp);
9695         if (err)
9696                 goto err_out1;
9697
9698         tg3_napi_init(tp);
9699
9700         tg3_napi_enable(tp);
9701
9702         for (i = 0; i < tp->irq_cnt; i++) {
9703                 struct tg3_napi *tnapi = &tp->napi[i];
9704                 err = tg3_request_irq(tp, i);
9705                 if (err) {
9706                         for (i--; i >= 0; i--) {
9707                                 tnapi = &tp->napi[i];
9708                                 free_irq(tnapi->irq_vec, tnapi);
9709                         }
9710                         goto err_out2;
9711                 }
9712         }
9713
9714         tg3_full_lock(tp, 0);
9715
9716         err = tg3_init_hw(tp, 1);
9717         if (err) {
9718                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9719                 tg3_free_rings(tp);
9720         } else {
9721                 if (tg3_flag(tp, TAGGED_STATUS) &&
9722                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9723                     !tg3_flag(tp, 57765_CLASS))
9724                         tp->timer_offset = HZ;
9725                 else
9726                         tp->timer_offset = HZ / 10;
9727
9728                 BUG_ON(tp->timer_offset > HZ);
9729                 tp->timer_counter = tp->timer_multiplier =
9730                         (HZ / tp->timer_offset);
9731                 tp->asf_counter = tp->asf_multiplier =
9732                         ((HZ / tp->timer_offset) * 2);
9733
9734                 init_timer(&tp->timer);
9735                 tp->timer.expires = jiffies + tp->timer_offset;
9736                 tp->timer.data = (unsigned long) tp;
9737                 tp->timer.function = tg3_timer;
9738         }
9739
9740         tg3_full_unlock(tp);
9741
9742         if (err)
9743                 goto err_out3;
9744
9745         if (tg3_flag(tp, USING_MSI)) {
9746                 err = tg3_test_msi(tp);
9747
9748                 if (err) {
9749                         tg3_full_lock(tp, 0);
9750                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9751                         tg3_free_rings(tp);
9752                         tg3_full_unlock(tp);
9753
9754                         goto err_out2;
9755                 }
9756
9757                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9758                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9759
9760                         tw32(PCIE_TRANSACTION_CFG,
9761                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9762                 }
9763         }
9764
9765         tg3_phy_start(tp);
9766
9767         tg3_full_lock(tp, 0);
9768
9769         add_timer(&tp->timer);
9770         tg3_flag_set(tp, INIT_COMPLETE);
9771         tg3_enable_ints(tp);
9772
9773         tg3_full_unlock(tp);
9774
9775         netif_tx_start_all_queues(dev);
9776
9777         /*
9778          * Reset loopback feature if it was turned on while the device was down
9779          * make sure that it's installed properly now.
9780          */
9781         if (dev->features & NETIF_F_LOOPBACK)
9782                 tg3_set_loopback(dev, dev->features);
9783
9784         return 0;
9785
9786 err_out3:
9787         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9788                 struct tg3_napi *tnapi = &tp->napi[i];
9789                 free_irq(tnapi->irq_vec, tnapi);
9790         }
9791
9792 err_out2:
9793         tg3_napi_disable(tp);
9794         tg3_napi_fini(tp);
9795         tg3_free_consistent(tp);
9796
9797 err_out1:
9798         tg3_ints_fini(tp);
9799         tg3_frob_aux_power(tp, false);
9800         pci_set_power_state(tp->pdev, PCI_D3hot);
9801         return err;
9802 }
9803
9804 static int tg3_close(struct net_device *dev)
9805 {
9806         int i;
9807         struct tg3 *tp = netdev_priv(dev);
9808
9809         tg3_napi_disable(tp);
9810         tg3_reset_task_cancel(tp);
9811
9812         netif_tx_stop_all_queues(dev);
9813
9814         del_timer_sync(&tp->timer);
9815
9816         tg3_phy_stop(tp);
9817
9818         tg3_full_lock(tp, 1);
9819
9820         tg3_disable_ints(tp);
9821
9822         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9823         tg3_free_rings(tp);
9824         tg3_flag_clear(tp, INIT_COMPLETE);
9825
9826         tg3_full_unlock(tp);
9827
9828         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9829                 struct tg3_napi *tnapi = &tp->napi[i];
9830                 free_irq(tnapi->irq_vec, tnapi);
9831         }
9832
9833         tg3_ints_fini(tp);
9834
9835         /* Clear stats across close / open calls */
9836         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
9837         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
9838
9839         tg3_napi_fini(tp);
9840
9841         tg3_free_consistent(tp);
9842
9843         tg3_power_down(tp);
9844
9845         netif_carrier_off(tp->dev);
9846
9847         return 0;
9848 }
9849
9850 static inline u64 get_stat64(tg3_stat64_t *val)
9851 {
9852        return ((u64)val->high << 32) | ((u64)val->low);
9853 }
9854
9855 static u64 calc_crc_errors(struct tg3 *tp)
9856 {
9857         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9858
9859         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9860             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9861              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9862                 u32 val;
9863
9864                 spin_lock_bh(&tp->lock);
9865                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9866                         tg3_writephy(tp, MII_TG3_TEST1,
9867                                      val | MII_TG3_TEST1_CRC_EN);
9868                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9869                 } else
9870                         val = 0;
9871                 spin_unlock_bh(&tp->lock);
9872
9873                 tp->phy_crc_errors += val;
9874
9875                 return tp->phy_crc_errors;
9876         }
9877
9878         return get_stat64(&hw_stats->rx_fcs_errors);
9879 }
9880
9881 #define ESTAT_ADD(member) \
9882         estats->member =        old_estats->member + \
9883                                 get_stat64(&hw_stats->member)
9884
9885 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
9886                                                struct tg3_ethtool_stats *estats)
9887 {
9888         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9889         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9890
9891         if (!hw_stats)
9892                 return old_estats;
9893
9894         ESTAT_ADD(rx_octets);
9895         ESTAT_ADD(rx_fragments);
9896         ESTAT_ADD(rx_ucast_packets);
9897         ESTAT_ADD(rx_mcast_packets);
9898         ESTAT_ADD(rx_bcast_packets);
9899         ESTAT_ADD(rx_fcs_errors);
9900         ESTAT_ADD(rx_align_errors);
9901         ESTAT_ADD(rx_xon_pause_rcvd);
9902         ESTAT_ADD(rx_xoff_pause_rcvd);
9903         ESTAT_ADD(rx_mac_ctrl_rcvd);
9904         ESTAT_ADD(rx_xoff_entered);
9905         ESTAT_ADD(rx_frame_too_long_errors);
9906         ESTAT_ADD(rx_jabbers);
9907         ESTAT_ADD(rx_undersize_packets);
9908         ESTAT_ADD(rx_in_length_errors);
9909         ESTAT_ADD(rx_out_length_errors);
9910         ESTAT_ADD(rx_64_or_less_octet_packets);
9911         ESTAT_ADD(rx_65_to_127_octet_packets);
9912         ESTAT_ADD(rx_128_to_255_octet_packets);
9913         ESTAT_ADD(rx_256_to_511_octet_packets);
9914         ESTAT_ADD(rx_512_to_1023_octet_packets);
9915         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9916         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9917         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9918         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9919         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9920
9921         ESTAT_ADD(tx_octets);
9922         ESTAT_ADD(tx_collisions);
9923         ESTAT_ADD(tx_xon_sent);
9924         ESTAT_ADD(tx_xoff_sent);
9925         ESTAT_ADD(tx_flow_control);
9926         ESTAT_ADD(tx_mac_errors);
9927         ESTAT_ADD(tx_single_collisions);
9928         ESTAT_ADD(tx_mult_collisions);
9929         ESTAT_ADD(tx_deferred);
9930         ESTAT_ADD(tx_excessive_collisions);
9931         ESTAT_ADD(tx_late_collisions);
9932         ESTAT_ADD(tx_collide_2times);
9933         ESTAT_ADD(tx_collide_3times);
9934         ESTAT_ADD(tx_collide_4times);
9935         ESTAT_ADD(tx_collide_5times);
9936         ESTAT_ADD(tx_collide_6times);
9937         ESTAT_ADD(tx_collide_7times);
9938         ESTAT_ADD(tx_collide_8times);
9939         ESTAT_ADD(tx_collide_9times);
9940         ESTAT_ADD(tx_collide_10times);
9941         ESTAT_ADD(tx_collide_11times);
9942         ESTAT_ADD(tx_collide_12times);
9943         ESTAT_ADD(tx_collide_13times);
9944         ESTAT_ADD(tx_collide_14times);
9945         ESTAT_ADD(tx_collide_15times);
9946         ESTAT_ADD(tx_ucast_packets);
9947         ESTAT_ADD(tx_mcast_packets);
9948         ESTAT_ADD(tx_bcast_packets);
9949         ESTAT_ADD(tx_carrier_sense_errors);
9950         ESTAT_ADD(tx_discards);
9951         ESTAT_ADD(tx_errors);
9952
9953         ESTAT_ADD(dma_writeq_full);
9954         ESTAT_ADD(dma_write_prioq_full);
9955         ESTAT_ADD(rxbds_empty);
9956         ESTAT_ADD(rx_discards);
9957         ESTAT_ADD(rx_errors);
9958         ESTAT_ADD(rx_threshold_hit);
9959
9960         ESTAT_ADD(dma_readq_full);
9961         ESTAT_ADD(dma_read_prioq_full);
9962         ESTAT_ADD(tx_comp_queue_full);
9963
9964         ESTAT_ADD(ring_set_send_prod_index);
9965         ESTAT_ADD(ring_status_update);
9966         ESTAT_ADD(nic_irqs);
9967         ESTAT_ADD(nic_avoided_irqs);
9968         ESTAT_ADD(nic_tx_threshold_hit);
9969
9970         ESTAT_ADD(mbuf_lwm_thresh_hit);
9971
9972         return estats;
9973 }
9974
9975 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9976                                                  struct rtnl_link_stats64 *stats)
9977 {
9978         struct tg3 *tp = netdev_priv(dev);
9979         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9980         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9981
9982         if (!hw_stats)
9983                 return old_stats;
9984
9985         stats->rx_packets = old_stats->rx_packets +
9986                 get_stat64(&hw_stats->rx_ucast_packets) +
9987                 get_stat64(&hw_stats->rx_mcast_packets) +
9988                 get_stat64(&hw_stats->rx_bcast_packets);
9989
9990         stats->tx_packets = old_stats->tx_packets +
9991                 get_stat64(&hw_stats->tx_ucast_packets) +
9992                 get_stat64(&hw_stats->tx_mcast_packets) +
9993                 get_stat64(&hw_stats->tx_bcast_packets);
9994
9995         stats->rx_bytes = old_stats->rx_bytes +
9996                 get_stat64(&hw_stats->rx_octets);
9997         stats->tx_bytes = old_stats->tx_bytes +
9998                 get_stat64(&hw_stats->tx_octets);
9999
10000         stats->rx_errors = old_stats->rx_errors +
10001                 get_stat64(&hw_stats->rx_errors);
10002         stats->tx_errors = old_stats->tx_errors +
10003                 get_stat64(&hw_stats->tx_errors) +
10004                 get_stat64(&hw_stats->tx_mac_errors) +
10005                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10006                 get_stat64(&hw_stats->tx_discards);
10007
10008         stats->multicast = old_stats->multicast +
10009                 get_stat64(&hw_stats->rx_mcast_packets);
10010         stats->collisions = old_stats->collisions +
10011                 get_stat64(&hw_stats->tx_collisions);
10012
10013         stats->rx_length_errors = old_stats->rx_length_errors +
10014                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10015                 get_stat64(&hw_stats->rx_undersize_packets);
10016
10017         stats->rx_over_errors = old_stats->rx_over_errors +
10018                 get_stat64(&hw_stats->rxbds_empty);
10019         stats->rx_frame_errors = old_stats->rx_frame_errors +
10020                 get_stat64(&hw_stats->rx_align_errors);
10021         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10022                 get_stat64(&hw_stats->tx_discards);
10023         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10024                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10025
10026         stats->rx_crc_errors = old_stats->rx_crc_errors +
10027                 calc_crc_errors(tp);
10028
10029         stats->rx_missed_errors = old_stats->rx_missed_errors +
10030                 get_stat64(&hw_stats->rx_discards);
10031
10032         stats->rx_dropped = tp->rx_dropped;
10033         stats->tx_dropped = tp->tx_dropped;
10034
10035         return stats;
10036 }
10037
10038 static inline u32 calc_crc(unsigned char *buf, int len)
10039 {
10040         u32 reg;
10041         u32 tmp;
10042         int j, k;
10043
10044         reg = 0xffffffff;
10045
10046         for (j = 0; j < len; j++) {
10047                 reg ^= buf[j];
10048
10049                 for (k = 0; k < 8; k++) {
10050                         tmp = reg & 0x01;
10051
10052                         reg >>= 1;
10053
10054                         if (tmp)
10055                                 reg ^= 0xedb88320;
10056                 }
10057         }
10058
10059         return ~reg;
10060 }
10061
10062 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10063 {
10064         /* accept or reject all multicast frames */
10065         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10066         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10067         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10068         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10069 }
10070
10071 static void __tg3_set_rx_mode(struct net_device *dev)
10072 {
10073         struct tg3 *tp = netdev_priv(dev);
10074         u32 rx_mode;
10075
10076         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10077                                   RX_MODE_KEEP_VLAN_TAG);
10078
10079 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10080         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10081          * flag clear.
10082          */
10083         if (!tg3_flag(tp, ENABLE_ASF))
10084                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10085 #endif
10086
10087         if (dev->flags & IFF_PROMISC) {
10088                 /* Promiscuous mode. */
10089                 rx_mode |= RX_MODE_PROMISC;
10090         } else if (dev->flags & IFF_ALLMULTI) {
10091                 /* Accept all multicast. */
10092                 tg3_set_multi(tp, 1);
10093         } else if (netdev_mc_empty(dev)) {
10094                 /* Reject all multicast. */
10095                 tg3_set_multi(tp, 0);
10096         } else {
10097                 /* Accept one or more multicast(s). */
10098                 struct netdev_hw_addr *ha;
10099                 u32 mc_filter[4] = { 0, };
10100                 u32 regidx;
10101                 u32 bit;
10102                 u32 crc;
10103
10104                 netdev_for_each_mc_addr(ha, dev) {
10105                         crc = calc_crc(ha->addr, ETH_ALEN);
10106                         bit = ~crc & 0x7f;
10107                         regidx = (bit & 0x60) >> 5;
10108                         bit &= 0x1f;
10109                         mc_filter[regidx] |= (1 << bit);
10110                 }
10111
10112                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10113                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10114                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10115                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10116         }
10117
10118         if (rx_mode != tp->rx_mode) {
10119                 tp->rx_mode = rx_mode;
10120                 tw32_f(MAC_RX_MODE, rx_mode);
10121                 udelay(10);
10122         }
10123 }
10124
10125 static void tg3_set_rx_mode(struct net_device *dev)
10126 {
10127         struct tg3 *tp = netdev_priv(dev);
10128
10129         if (!netif_running(dev))
10130                 return;
10131
10132         tg3_full_lock(tp, 0);
10133         __tg3_set_rx_mode(dev);
10134         tg3_full_unlock(tp);
10135 }
10136
10137 static int tg3_get_regs_len(struct net_device *dev)
10138 {
10139         return TG3_REG_BLK_SIZE;
10140 }
10141
10142 static void tg3_get_regs(struct net_device *dev,
10143                 struct ethtool_regs *regs, void *_p)
10144 {
10145         struct tg3 *tp = netdev_priv(dev);
10146
10147         regs->version = 0;
10148
10149         memset(_p, 0, TG3_REG_BLK_SIZE);
10150
10151         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10152                 return;
10153
10154         tg3_full_lock(tp, 0);
10155
10156         tg3_dump_legacy_regs(tp, (u32 *)_p);
10157
10158         tg3_full_unlock(tp);
10159 }
10160
10161 static int tg3_get_eeprom_len(struct net_device *dev)
10162 {
10163         struct tg3 *tp = netdev_priv(dev);
10164
10165         return tp->nvram_size;
10166 }
10167
10168 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10169 {
10170         struct tg3 *tp = netdev_priv(dev);
10171         int ret;
10172         u8  *pd;
10173         u32 i, offset, len, b_offset, b_count;
10174         __be32 val;
10175
10176         if (tg3_flag(tp, NO_NVRAM))
10177                 return -EINVAL;
10178
10179         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10180                 return -EAGAIN;
10181
10182         offset = eeprom->offset;
10183         len = eeprom->len;
10184         eeprom->len = 0;
10185
10186         eeprom->magic = TG3_EEPROM_MAGIC;
10187
10188         if (offset & 3) {
10189                 /* adjustments to start on required 4 byte boundary */
10190                 b_offset = offset & 3;
10191                 b_count = 4 - b_offset;
10192                 if (b_count > len) {
10193                         /* i.e. offset=1 len=2 */
10194                         b_count = len;
10195                 }
10196                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10197                 if (ret)
10198                         return ret;
10199                 memcpy(data, ((char *)&val) + b_offset, b_count);
10200                 len -= b_count;
10201                 offset += b_count;
10202                 eeprom->len += b_count;
10203         }
10204
10205         /* read bytes up to the last 4 byte boundary */
10206         pd = &data[eeprom->len];
10207         for (i = 0; i < (len - (len & 3)); i += 4) {
10208                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10209                 if (ret) {
10210                         eeprom->len += i;
10211                         return ret;
10212                 }
10213                 memcpy(pd + i, &val, 4);
10214         }
10215         eeprom->len += i;
10216
10217         if (len & 3) {
10218                 /* read last bytes not ending on 4 byte boundary */
10219                 pd = &data[eeprom->len];
10220                 b_count = len & 3;
10221                 b_offset = offset + len - b_count;
10222                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10223                 if (ret)
10224                         return ret;
10225                 memcpy(pd, &val, b_count);
10226                 eeprom->len += b_count;
10227         }
10228         return 0;
10229 }
10230
10231 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10232
10233 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10234 {
10235         struct tg3 *tp = netdev_priv(dev);
10236         int ret;
10237         u32 offset, len, b_offset, odd_len;
10238         u8 *buf;
10239         __be32 start, end;
10240
10241         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10242                 return -EAGAIN;
10243
10244         if (tg3_flag(tp, NO_NVRAM) ||
10245             eeprom->magic != TG3_EEPROM_MAGIC)
10246                 return -EINVAL;
10247
10248         offset = eeprom->offset;
10249         len = eeprom->len;
10250
10251         if ((b_offset = (offset & 3))) {
10252                 /* adjustments to start on required 4 byte boundary */
10253                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10254                 if (ret)
10255                         return ret;
10256                 len += b_offset;
10257                 offset &= ~3;
10258                 if (len < 4)
10259                         len = 4;
10260         }
10261
10262         odd_len = 0;
10263         if (len & 3) {
10264                 /* adjustments to end on required 4 byte boundary */
10265                 odd_len = 1;
10266                 len = (len + 3) & ~3;
10267                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10268                 if (ret)
10269                         return ret;
10270         }
10271
10272         buf = data;
10273         if (b_offset || odd_len) {
10274                 buf = kmalloc(len, GFP_KERNEL);
10275                 if (!buf)
10276                         return -ENOMEM;
10277                 if (b_offset)
10278                         memcpy(buf, &start, 4);
10279                 if (odd_len)
10280                         memcpy(buf+len-4, &end, 4);
10281                 memcpy(buf + b_offset, data, eeprom->len);
10282         }
10283
10284         ret = tg3_nvram_write_block(tp, offset, len, buf);
10285
10286         if (buf != data)
10287                 kfree(buf);
10288
10289         return ret;
10290 }
10291
10292 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10293 {
10294         struct tg3 *tp = netdev_priv(dev);
10295
10296         if (tg3_flag(tp, USE_PHYLIB)) {
10297                 struct phy_device *phydev;
10298                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10299                         return -EAGAIN;
10300                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10301                 return phy_ethtool_gset(phydev, cmd);
10302         }
10303
10304         cmd->supported = (SUPPORTED_Autoneg);
10305
10306         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10307                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10308                                    SUPPORTED_1000baseT_Full);
10309
10310         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10311                 cmd->supported |= (SUPPORTED_100baseT_Half |
10312                                   SUPPORTED_100baseT_Full |
10313                                   SUPPORTED_10baseT_Half |
10314                                   SUPPORTED_10baseT_Full |
10315                                   SUPPORTED_TP);
10316                 cmd->port = PORT_TP;
10317         } else {
10318                 cmd->supported |= SUPPORTED_FIBRE;
10319                 cmd->port = PORT_FIBRE;
10320         }
10321
10322         cmd->advertising = tp->link_config.advertising;
10323         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10324                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10325                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10326                                 cmd->advertising |= ADVERTISED_Pause;
10327                         } else {
10328                                 cmd->advertising |= ADVERTISED_Pause |
10329                                                     ADVERTISED_Asym_Pause;
10330                         }
10331                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10332                         cmd->advertising |= ADVERTISED_Asym_Pause;
10333                 }
10334         }
10335         if (netif_running(dev) && netif_carrier_ok(dev)) {
10336                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10337                 cmd->duplex = tp->link_config.active_duplex;
10338                 cmd->lp_advertising = tp->link_config.rmt_adv;
10339                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10340                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10341                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10342                         else
10343                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10344                 }
10345         } else {
10346                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10347                 cmd->duplex = DUPLEX_INVALID;
10348                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10349         }
10350         cmd->phy_address = tp->phy_addr;
10351         cmd->transceiver = XCVR_INTERNAL;
10352         cmd->autoneg = tp->link_config.autoneg;
10353         cmd->maxtxpkt = 0;
10354         cmd->maxrxpkt = 0;
10355         return 0;
10356 }
10357
10358 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10359 {
10360         struct tg3 *tp = netdev_priv(dev);
10361         u32 speed = ethtool_cmd_speed(cmd);
10362
10363         if (tg3_flag(tp, USE_PHYLIB)) {
10364                 struct phy_device *phydev;
10365                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10366                         return -EAGAIN;
10367                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10368                 return phy_ethtool_sset(phydev, cmd);
10369         }
10370
10371         if (cmd->autoneg != AUTONEG_ENABLE &&
10372             cmd->autoneg != AUTONEG_DISABLE)
10373                 return -EINVAL;
10374
10375         if (cmd->autoneg == AUTONEG_DISABLE &&
10376             cmd->duplex != DUPLEX_FULL &&
10377             cmd->duplex != DUPLEX_HALF)
10378                 return -EINVAL;
10379
10380         if (cmd->autoneg == AUTONEG_ENABLE) {
10381                 u32 mask = ADVERTISED_Autoneg |
10382                            ADVERTISED_Pause |
10383                            ADVERTISED_Asym_Pause;
10384
10385                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10386                         mask |= ADVERTISED_1000baseT_Half |
10387                                 ADVERTISED_1000baseT_Full;
10388
10389                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10390                         mask |= ADVERTISED_100baseT_Half |
10391                                 ADVERTISED_100baseT_Full |
10392                                 ADVERTISED_10baseT_Half |
10393                                 ADVERTISED_10baseT_Full |
10394                                 ADVERTISED_TP;
10395                 else
10396                         mask |= ADVERTISED_FIBRE;
10397
10398                 if (cmd->advertising & ~mask)
10399                         return -EINVAL;
10400
10401                 mask &= (ADVERTISED_1000baseT_Half |
10402                          ADVERTISED_1000baseT_Full |
10403                          ADVERTISED_100baseT_Half |
10404                          ADVERTISED_100baseT_Full |
10405                          ADVERTISED_10baseT_Half |
10406                          ADVERTISED_10baseT_Full);
10407
10408                 cmd->advertising &= mask;
10409         } else {
10410                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10411                         if (speed != SPEED_1000)
10412                                 return -EINVAL;
10413
10414                         if (cmd->duplex != DUPLEX_FULL)
10415                                 return -EINVAL;
10416                 } else {
10417                         if (speed != SPEED_100 &&
10418                             speed != SPEED_10)
10419                                 return -EINVAL;
10420                 }
10421         }
10422
10423         tg3_full_lock(tp, 0);
10424
10425         tp->link_config.autoneg = cmd->autoneg;
10426         if (cmd->autoneg == AUTONEG_ENABLE) {
10427                 tp->link_config.advertising = (cmd->advertising |
10428                                               ADVERTISED_Autoneg);
10429                 tp->link_config.speed = SPEED_INVALID;
10430                 tp->link_config.duplex = DUPLEX_INVALID;
10431         } else {
10432                 tp->link_config.advertising = 0;
10433                 tp->link_config.speed = speed;
10434                 tp->link_config.duplex = cmd->duplex;
10435         }
10436
10437         tp->link_config.orig_speed = tp->link_config.speed;
10438         tp->link_config.orig_duplex = tp->link_config.duplex;
10439         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10440
10441         if (netif_running(dev))
10442                 tg3_setup_phy(tp, 1);
10443
10444         tg3_full_unlock(tp);
10445
10446         return 0;
10447 }
10448
10449 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10450 {
10451         struct tg3 *tp = netdev_priv(dev);
10452
10453         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10454         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10455         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10456         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10457 }
10458
10459 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10460 {
10461         struct tg3 *tp = netdev_priv(dev);
10462
10463         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10464                 wol->supported = WAKE_MAGIC;
10465         else
10466                 wol->supported = 0;
10467         wol->wolopts = 0;
10468         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10469                 wol->wolopts = WAKE_MAGIC;
10470         memset(&wol->sopass, 0, sizeof(wol->sopass));
10471 }
10472
10473 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10474 {
10475         struct tg3 *tp = netdev_priv(dev);
10476         struct device *dp = &tp->pdev->dev;
10477
10478         if (wol->wolopts & ~WAKE_MAGIC)
10479                 return -EINVAL;
10480         if ((wol->wolopts & WAKE_MAGIC) &&
10481             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10482                 return -EINVAL;
10483
10484         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10485
10486         spin_lock_bh(&tp->lock);
10487         if (device_may_wakeup(dp))
10488                 tg3_flag_set(tp, WOL_ENABLE);
10489         else
10490                 tg3_flag_clear(tp, WOL_ENABLE);
10491         spin_unlock_bh(&tp->lock);
10492
10493         return 0;
10494 }
10495
10496 static u32 tg3_get_msglevel(struct net_device *dev)
10497 {
10498         struct tg3 *tp = netdev_priv(dev);
10499         return tp->msg_enable;
10500 }
10501
10502 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10503 {
10504         struct tg3 *tp = netdev_priv(dev);
10505         tp->msg_enable = value;
10506 }
10507
10508 static int tg3_nway_reset(struct net_device *dev)
10509 {
10510         struct tg3 *tp = netdev_priv(dev);
10511         int r;
10512
10513         if (!netif_running(dev))
10514                 return -EAGAIN;
10515
10516         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10517                 return -EINVAL;
10518
10519         if (tg3_flag(tp, USE_PHYLIB)) {
10520                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10521                         return -EAGAIN;
10522                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10523         } else {
10524                 u32 bmcr;
10525
10526                 spin_lock_bh(&tp->lock);
10527                 r = -EINVAL;
10528                 tg3_readphy(tp, MII_BMCR, &bmcr);
10529                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10530                     ((bmcr & BMCR_ANENABLE) ||
10531                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10532                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10533                                                    BMCR_ANENABLE);
10534                         r = 0;
10535                 }
10536                 spin_unlock_bh(&tp->lock);
10537         }
10538
10539         return r;
10540 }
10541
10542 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10543 {
10544         struct tg3 *tp = netdev_priv(dev);
10545
10546         ering->rx_max_pending = tp->rx_std_ring_mask;
10547         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10548                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10549         else
10550                 ering->rx_jumbo_max_pending = 0;
10551
10552         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10553
10554         ering->rx_pending = tp->rx_pending;
10555         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10556                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10557         else
10558                 ering->rx_jumbo_pending = 0;
10559
10560         ering->tx_pending = tp->napi[0].tx_pending;
10561 }
10562
10563 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10564 {
10565         struct tg3 *tp = netdev_priv(dev);
10566         int i, irq_sync = 0, err = 0;
10567
10568         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10569             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10570             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10571             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10572             (tg3_flag(tp, TSO_BUG) &&
10573              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10574                 return -EINVAL;
10575
10576         if (netif_running(dev)) {
10577                 tg3_phy_stop(tp);
10578                 tg3_netif_stop(tp);
10579                 irq_sync = 1;
10580         }
10581
10582         tg3_full_lock(tp, irq_sync);
10583
10584         tp->rx_pending = ering->rx_pending;
10585
10586         if (tg3_flag(tp, MAX_RXPEND_64) &&
10587             tp->rx_pending > 63)
10588                 tp->rx_pending = 63;
10589         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10590
10591         for (i = 0; i < tp->irq_max; i++)
10592                 tp->napi[i].tx_pending = ering->tx_pending;
10593
10594         if (netif_running(dev)) {
10595                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10596                 err = tg3_restart_hw(tp, 1);
10597                 if (!err)
10598                         tg3_netif_start(tp);
10599         }
10600
10601         tg3_full_unlock(tp);
10602
10603         if (irq_sync && !err)
10604                 tg3_phy_start(tp);
10605
10606         return err;
10607 }
10608
10609 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10610 {
10611         struct tg3 *tp = netdev_priv(dev);
10612
10613         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10614
10615         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10616                 epause->rx_pause = 1;
10617         else
10618                 epause->rx_pause = 0;
10619
10620         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10621                 epause->tx_pause = 1;
10622         else
10623                 epause->tx_pause = 0;
10624 }
10625
10626 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10627 {
10628         struct tg3 *tp = netdev_priv(dev);
10629         int err = 0;
10630
10631         if (tg3_flag(tp, USE_PHYLIB)) {
10632                 u32 newadv;
10633                 struct phy_device *phydev;
10634
10635                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10636
10637                 if (!(phydev->supported & SUPPORTED_Pause) ||
10638                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10639                      (epause->rx_pause != epause->tx_pause)))
10640                         return -EINVAL;
10641
10642                 tp->link_config.flowctrl = 0;
10643                 if (epause->rx_pause) {
10644                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10645
10646                         if (epause->tx_pause) {
10647                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10648                                 newadv = ADVERTISED_Pause;
10649                         } else
10650                                 newadv = ADVERTISED_Pause |
10651                                          ADVERTISED_Asym_Pause;
10652                 } else if (epause->tx_pause) {
10653                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10654                         newadv = ADVERTISED_Asym_Pause;
10655                 } else
10656                         newadv = 0;
10657
10658                 if (epause->autoneg)
10659                         tg3_flag_set(tp, PAUSE_AUTONEG);
10660                 else
10661                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10662
10663                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10664                         u32 oldadv = phydev->advertising &
10665                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10666                         if (oldadv != newadv) {
10667                                 phydev->advertising &=
10668                                         ~(ADVERTISED_Pause |
10669                                           ADVERTISED_Asym_Pause);
10670                                 phydev->advertising |= newadv;
10671                                 if (phydev->autoneg) {
10672                                         /*
10673                                          * Always renegotiate the link to
10674                                          * inform our link partner of our
10675                                          * flow control settings, even if the
10676                                          * flow control is forced.  Let
10677                                          * tg3_adjust_link() do the final
10678                                          * flow control setup.
10679                                          */
10680                                         return phy_start_aneg(phydev);
10681                                 }
10682                         }
10683
10684                         if (!epause->autoneg)
10685                                 tg3_setup_flow_control(tp, 0, 0);
10686                 } else {
10687                         tp->link_config.orig_advertising &=
10688                                         ~(ADVERTISED_Pause |
10689                                           ADVERTISED_Asym_Pause);
10690                         tp->link_config.orig_advertising |= newadv;
10691                 }
10692         } else {
10693                 int irq_sync = 0;
10694
10695                 if (netif_running(dev)) {
10696                         tg3_netif_stop(tp);
10697                         irq_sync = 1;
10698                 }
10699
10700                 tg3_full_lock(tp, irq_sync);
10701
10702                 if (epause->autoneg)
10703                         tg3_flag_set(tp, PAUSE_AUTONEG);
10704                 else
10705                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10706                 if (epause->rx_pause)
10707                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10708                 else
10709                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10710                 if (epause->tx_pause)
10711                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10712                 else
10713                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10714
10715                 if (netif_running(dev)) {
10716                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10717                         err = tg3_restart_hw(tp, 1);
10718                         if (!err)
10719                                 tg3_netif_start(tp);
10720                 }
10721
10722                 tg3_full_unlock(tp);
10723         }
10724
10725         return err;
10726 }
10727
10728 static int tg3_get_sset_count(struct net_device *dev, int sset)
10729 {
10730         switch (sset) {
10731         case ETH_SS_TEST:
10732                 return TG3_NUM_TEST;
10733         case ETH_SS_STATS:
10734                 return TG3_NUM_STATS;
10735         default:
10736                 return -EOPNOTSUPP;
10737         }
10738 }
10739
10740 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10741                          u32 *rules __always_unused)
10742 {
10743         struct tg3 *tp = netdev_priv(dev);
10744
10745         if (!tg3_flag(tp, SUPPORT_MSIX))
10746                 return -EOPNOTSUPP;
10747
10748         switch (info->cmd) {
10749         case ETHTOOL_GRXRINGS:
10750                 if (netif_running(tp->dev))
10751                         info->data = tp->irq_cnt;
10752                 else {
10753                         info->data = num_online_cpus();
10754                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10755                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10756                 }
10757
10758                 /* The first interrupt vector only
10759                  * handles link interrupts.
10760                  */
10761                 info->data -= 1;
10762                 return 0;
10763
10764         default:
10765                 return -EOPNOTSUPP;
10766         }
10767 }
10768
10769 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10770 {
10771         u32 size = 0;
10772         struct tg3 *tp = netdev_priv(dev);
10773
10774         if (tg3_flag(tp, SUPPORT_MSIX))
10775                 size = TG3_RSS_INDIR_TBL_SIZE;
10776
10777         return size;
10778 }
10779
10780 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10781 {
10782         struct tg3 *tp = netdev_priv(dev);
10783         int i;
10784
10785         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10786                 indir[i] = tp->rss_ind_tbl[i];
10787
10788         return 0;
10789 }
10790
10791 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10792 {
10793         struct tg3 *tp = netdev_priv(dev);
10794         size_t i;
10795
10796         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10797                 tp->rss_ind_tbl[i] = indir[i];
10798
10799         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10800                 return 0;
10801
10802         /* It is legal to write the indirection
10803          * table while the device is running.
10804          */
10805         tg3_full_lock(tp, 0);
10806         tg3_rss_write_indir_tbl(tp);
10807         tg3_full_unlock(tp);
10808
10809         return 0;
10810 }
10811
10812 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10813 {
10814         switch (stringset) {
10815         case ETH_SS_STATS:
10816                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10817                 break;
10818         case ETH_SS_TEST:
10819                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10820                 break;
10821         default:
10822                 WARN_ON(1);     /* we need a WARN() */
10823                 break;
10824         }
10825 }
10826
10827 static int tg3_set_phys_id(struct net_device *dev,
10828                             enum ethtool_phys_id_state state)
10829 {
10830         struct tg3 *tp = netdev_priv(dev);
10831
10832         if (!netif_running(tp->dev))
10833                 return -EAGAIN;
10834
10835         switch (state) {
10836         case ETHTOOL_ID_ACTIVE:
10837                 return 1;       /* cycle on/off once per second */
10838
10839         case ETHTOOL_ID_ON:
10840                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10841                      LED_CTRL_1000MBPS_ON |
10842                      LED_CTRL_100MBPS_ON |
10843                      LED_CTRL_10MBPS_ON |
10844                      LED_CTRL_TRAFFIC_OVERRIDE |
10845                      LED_CTRL_TRAFFIC_BLINK |
10846                      LED_CTRL_TRAFFIC_LED);
10847                 break;
10848
10849         case ETHTOOL_ID_OFF:
10850                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10851                      LED_CTRL_TRAFFIC_OVERRIDE);
10852                 break;
10853
10854         case ETHTOOL_ID_INACTIVE:
10855                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10856                 break;
10857         }
10858
10859         return 0;
10860 }
10861
10862 static void tg3_get_ethtool_stats(struct net_device *dev,
10863                                    struct ethtool_stats *estats, u64 *tmp_stats)
10864 {
10865         struct tg3 *tp = netdev_priv(dev);
10866
10867         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
10868 }
10869
10870 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10871 {
10872         int i;
10873         __be32 *buf;
10874         u32 offset = 0, len = 0;
10875         u32 magic, val;
10876
10877         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10878                 return NULL;
10879
10880         if (magic == TG3_EEPROM_MAGIC) {
10881                 for (offset = TG3_NVM_DIR_START;
10882                      offset < TG3_NVM_DIR_END;
10883                      offset += TG3_NVM_DIRENT_SIZE) {
10884                         if (tg3_nvram_read(tp, offset, &val))
10885                                 return NULL;
10886
10887                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10888                             TG3_NVM_DIRTYPE_EXTVPD)
10889                                 break;
10890                 }
10891
10892                 if (offset != TG3_NVM_DIR_END) {
10893                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10894                         if (tg3_nvram_read(tp, offset + 4, &offset))
10895                                 return NULL;
10896
10897                         offset = tg3_nvram_logical_addr(tp, offset);
10898                 }
10899         }
10900
10901         if (!offset || !len) {
10902                 offset = TG3_NVM_VPD_OFF;
10903                 len = TG3_NVM_VPD_LEN;
10904         }
10905
10906         buf = kmalloc(len, GFP_KERNEL);
10907         if (buf == NULL)
10908                 return NULL;
10909
10910         if (magic == TG3_EEPROM_MAGIC) {
10911                 for (i = 0; i < len; i += 4) {
10912                         /* The data is in little-endian format in NVRAM.
10913                          * Use the big-endian read routines to preserve
10914                          * the byte order as it exists in NVRAM.
10915                          */
10916                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10917                                 goto error;
10918                 }
10919         } else {
10920                 u8 *ptr;
10921                 ssize_t cnt;
10922                 unsigned int pos = 0;
10923
10924                 ptr = (u8 *)&buf[0];
10925                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10926                         cnt = pci_read_vpd(tp->pdev, pos,
10927                                            len - pos, ptr);
10928                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10929                                 cnt = 0;
10930                         else if (cnt < 0)
10931                                 goto error;
10932                 }
10933                 if (pos != len)
10934                         goto error;
10935         }
10936
10937         *vpdlen = len;
10938
10939         return buf;
10940
10941 error:
10942         kfree(buf);
10943         return NULL;
10944 }
10945
10946 #define NVRAM_TEST_SIZE 0x100
10947 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10948 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10949 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10950 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10951 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10952 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10953 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10954 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10955
10956 static int tg3_test_nvram(struct tg3 *tp)
10957 {
10958         u32 csum, magic, len;
10959         __be32 *buf;
10960         int i, j, k, err = 0, size;
10961
10962         if (tg3_flag(tp, NO_NVRAM))
10963                 return 0;
10964
10965         if (tg3_nvram_read(tp, 0, &magic) != 0)
10966                 return -EIO;
10967
10968         if (magic == TG3_EEPROM_MAGIC)
10969                 size = NVRAM_TEST_SIZE;
10970         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10971                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10972                     TG3_EEPROM_SB_FORMAT_1) {
10973                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10974                         case TG3_EEPROM_SB_REVISION_0:
10975                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10976                                 break;
10977                         case TG3_EEPROM_SB_REVISION_2:
10978                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10979                                 break;
10980                         case TG3_EEPROM_SB_REVISION_3:
10981                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10982                                 break;
10983                         case TG3_EEPROM_SB_REVISION_4:
10984                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10985                                 break;
10986                         case TG3_EEPROM_SB_REVISION_5:
10987                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10988                                 break;
10989                         case TG3_EEPROM_SB_REVISION_6:
10990                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10991                                 break;
10992                         default:
10993                                 return -EIO;
10994                         }
10995                 } else
10996                         return 0;
10997         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10998                 size = NVRAM_SELFBOOT_HW_SIZE;
10999         else
11000                 return -EIO;
11001
11002         buf = kmalloc(size, GFP_KERNEL);
11003         if (buf == NULL)
11004                 return -ENOMEM;
11005
11006         err = -EIO;
11007         for (i = 0, j = 0; i < size; i += 4, j++) {
11008                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11009                 if (err)
11010                         break;
11011         }
11012         if (i < size)
11013                 goto out;
11014
11015         /* Selfboot format */
11016         magic = be32_to_cpu(buf[0]);
11017         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11018             TG3_EEPROM_MAGIC_FW) {
11019                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11020
11021                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11022                     TG3_EEPROM_SB_REVISION_2) {
11023                         /* For rev 2, the csum doesn't include the MBA. */
11024                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11025                                 csum8 += buf8[i];
11026                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11027                                 csum8 += buf8[i];
11028                 } else {
11029                         for (i = 0; i < size; i++)
11030                                 csum8 += buf8[i];
11031                 }
11032
11033                 if (csum8 == 0) {
11034                         err = 0;
11035                         goto out;
11036                 }
11037
11038                 err = -EIO;
11039                 goto out;
11040         }
11041
11042         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11043             TG3_EEPROM_MAGIC_HW) {
11044                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11045                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11046                 u8 *buf8 = (u8 *) buf;
11047
11048                 /* Separate the parity bits and the data bytes.  */
11049                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11050                         if ((i == 0) || (i == 8)) {
11051                                 int l;
11052                                 u8 msk;
11053
11054                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11055                                         parity[k++] = buf8[i] & msk;
11056                                 i++;
11057                         } else if (i == 16) {
11058                                 int l;
11059                                 u8 msk;
11060
11061                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11062                                         parity[k++] = buf8[i] & msk;
11063                                 i++;
11064
11065                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11066                                         parity[k++] = buf8[i] & msk;
11067                                 i++;
11068                         }
11069                         data[j++] = buf8[i];
11070                 }
11071
11072                 err = -EIO;
11073                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11074                         u8 hw8 = hweight8(data[i]);
11075
11076                         if ((hw8 & 0x1) && parity[i])
11077                                 goto out;
11078                         else if (!(hw8 & 0x1) && !parity[i])
11079                                 goto out;
11080                 }
11081                 err = 0;
11082                 goto out;
11083         }
11084
11085         err = -EIO;
11086
11087         /* Bootstrap checksum at offset 0x10 */
11088         csum = calc_crc((unsigned char *) buf, 0x10);
11089         if (csum != le32_to_cpu(buf[0x10/4]))
11090                 goto out;
11091
11092         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11093         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11094         if (csum != le32_to_cpu(buf[0xfc/4]))
11095                 goto out;
11096
11097         kfree(buf);
11098
11099         buf = tg3_vpd_readblock(tp, &len);
11100         if (!buf)
11101                 return -ENOMEM;
11102
11103         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11104         if (i > 0) {
11105                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11106                 if (j < 0)
11107                         goto out;
11108
11109                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11110                         goto out;
11111
11112                 i += PCI_VPD_LRDT_TAG_SIZE;
11113                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11114                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11115                 if (j > 0) {
11116                         u8 csum8 = 0;
11117
11118                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11119
11120                         for (i = 0; i <= j; i++)
11121                                 csum8 += ((u8 *)buf)[i];
11122
11123                         if (csum8)
11124                                 goto out;
11125                 }
11126         }
11127
11128         err = 0;
11129
11130 out:
11131         kfree(buf);
11132         return err;
11133 }
11134
11135 #define TG3_SERDES_TIMEOUT_SEC  2
11136 #define TG3_COPPER_TIMEOUT_SEC  6
11137
11138 static int tg3_test_link(struct tg3 *tp)
11139 {
11140         int i, max;
11141
11142         if (!netif_running(tp->dev))
11143                 return -ENODEV;
11144
11145         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11146                 max = TG3_SERDES_TIMEOUT_SEC;
11147         else
11148                 max = TG3_COPPER_TIMEOUT_SEC;
11149
11150         for (i = 0; i < max; i++) {
11151                 if (netif_carrier_ok(tp->dev))
11152                         return 0;
11153
11154                 if (msleep_interruptible(1000))
11155                         break;
11156         }
11157
11158         return -EIO;
11159 }
11160
11161 /* Only test the commonly used registers */
11162 static int tg3_test_registers(struct tg3 *tp)
11163 {
11164         int i, is_5705, is_5750;
11165         u32 offset, read_mask, write_mask, val, save_val, read_val;
11166         static struct {
11167                 u16 offset;
11168                 u16 flags;
11169 #define TG3_FL_5705     0x1
11170 #define TG3_FL_NOT_5705 0x2
11171 #define TG3_FL_NOT_5788 0x4
11172 #define TG3_FL_NOT_5750 0x8
11173                 u32 read_mask;
11174                 u32 write_mask;
11175         } reg_tbl[] = {
11176                 /* MAC Control Registers */
11177                 { MAC_MODE, TG3_FL_NOT_5705,
11178                         0x00000000, 0x00ef6f8c },
11179                 { MAC_MODE, TG3_FL_5705,
11180                         0x00000000, 0x01ef6b8c },
11181                 { MAC_STATUS, TG3_FL_NOT_5705,
11182                         0x03800107, 0x00000000 },
11183                 { MAC_STATUS, TG3_FL_5705,
11184                         0x03800100, 0x00000000 },
11185                 { MAC_ADDR_0_HIGH, 0x0000,
11186                         0x00000000, 0x0000ffff },
11187                 { MAC_ADDR_0_LOW, 0x0000,
11188                         0x00000000, 0xffffffff },
11189                 { MAC_RX_MTU_SIZE, 0x0000,
11190                         0x00000000, 0x0000ffff },
11191                 { MAC_TX_MODE, 0x0000,
11192                         0x00000000, 0x00000070 },
11193                 { MAC_TX_LENGTHS, 0x0000,
11194                         0x00000000, 0x00003fff },
11195                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11196                         0x00000000, 0x000007fc },
11197                 { MAC_RX_MODE, TG3_FL_5705,
11198                         0x00000000, 0x000007dc },
11199                 { MAC_HASH_REG_0, 0x0000,
11200                         0x00000000, 0xffffffff },
11201                 { MAC_HASH_REG_1, 0x0000,
11202                         0x00000000, 0xffffffff },
11203                 { MAC_HASH_REG_2, 0x0000,
11204                         0x00000000, 0xffffffff },
11205                 { MAC_HASH_REG_3, 0x0000,
11206                         0x00000000, 0xffffffff },
11207
11208                 /* Receive Data and Receive BD Initiator Control Registers. */
11209                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11210                         0x00000000, 0xffffffff },
11211                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11212                         0x00000000, 0xffffffff },
11213                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11214                         0x00000000, 0x00000003 },
11215                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11216                         0x00000000, 0xffffffff },
11217                 { RCVDBDI_STD_BD+0, 0x0000,
11218                         0x00000000, 0xffffffff },
11219                 { RCVDBDI_STD_BD+4, 0x0000,
11220                         0x00000000, 0xffffffff },
11221                 { RCVDBDI_STD_BD+8, 0x0000,
11222                         0x00000000, 0xffff0002 },
11223                 { RCVDBDI_STD_BD+0xc, 0x0000,
11224                         0x00000000, 0xffffffff },
11225
11226                 /* Receive BD Initiator Control Registers. */
11227                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11228                         0x00000000, 0xffffffff },
11229                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11230                         0x00000000, 0x000003ff },
11231                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11232                         0x00000000, 0xffffffff },
11233
11234                 /* Host Coalescing Control Registers. */
11235                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11236                         0x00000000, 0x00000004 },
11237                 { HOSTCC_MODE, TG3_FL_5705,
11238                         0x00000000, 0x000000f6 },
11239                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11240                         0x00000000, 0xffffffff },
11241                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11242                         0x00000000, 0x000003ff },
11243                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11244                         0x00000000, 0xffffffff },
11245                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11246                         0x00000000, 0x000003ff },
11247                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11248                         0x00000000, 0xffffffff },
11249                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11250                         0x00000000, 0x000000ff },
11251                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11252                         0x00000000, 0xffffffff },
11253                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11254                         0x00000000, 0x000000ff },
11255                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11256                         0x00000000, 0xffffffff },
11257                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11258                         0x00000000, 0xffffffff },
11259                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11260                         0x00000000, 0xffffffff },
11261                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11262                         0x00000000, 0x000000ff },
11263                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11264                         0x00000000, 0xffffffff },
11265                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11266                         0x00000000, 0x000000ff },
11267                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11268                         0x00000000, 0xffffffff },
11269                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11270                         0x00000000, 0xffffffff },
11271                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11272                         0x00000000, 0xffffffff },
11273                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11274                         0x00000000, 0xffffffff },
11275                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11276                         0x00000000, 0xffffffff },
11277                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11278                         0xffffffff, 0x00000000 },
11279                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11280                         0xffffffff, 0x00000000 },
11281
11282                 /* Buffer Manager Control Registers. */
11283                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11284                         0x00000000, 0x007fff80 },
11285                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11286                         0x00000000, 0x007fffff },
11287                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11288                         0x00000000, 0x0000003f },
11289                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11290                         0x00000000, 0x000001ff },
11291                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11292                         0x00000000, 0x000001ff },
11293                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11294                         0xffffffff, 0x00000000 },
11295                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11296                         0xffffffff, 0x00000000 },
11297
11298                 /* Mailbox Registers */
11299                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11300                         0x00000000, 0x000001ff },
11301                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11302                         0x00000000, 0x000001ff },
11303                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11304                         0x00000000, 0x000007ff },
11305                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11306                         0x00000000, 0x000001ff },
11307
11308                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11309         };
11310
11311         is_5705 = is_5750 = 0;
11312         if (tg3_flag(tp, 5705_PLUS)) {
11313                 is_5705 = 1;
11314                 if (tg3_flag(tp, 5750_PLUS))
11315                         is_5750 = 1;
11316         }
11317
11318         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11319                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11320                         continue;
11321
11322                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11323                         continue;
11324
11325                 if (tg3_flag(tp, IS_5788) &&
11326                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11327                         continue;
11328
11329                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11330                         continue;
11331
11332                 offset = (u32) reg_tbl[i].offset;
11333                 read_mask = reg_tbl[i].read_mask;
11334                 write_mask = reg_tbl[i].write_mask;
11335
11336                 /* Save the original register content */
11337                 save_val = tr32(offset);
11338
11339                 /* Determine the read-only value. */
11340                 read_val = save_val & read_mask;
11341
11342                 /* Write zero to the register, then make sure the read-only bits
11343                  * are not changed and the read/write bits are all zeros.
11344                  */
11345                 tw32(offset, 0);
11346
11347                 val = tr32(offset);
11348
11349                 /* Test the read-only and read/write bits. */
11350                 if (((val & read_mask) != read_val) || (val & write_mask))
11351                         goto out;
11352
11353                 /* Write ones to all the bits defined by RdMask and WrMask, then
11354                  * make sure the read-only bits are not changed and the
11355                  * read/write bits are all ones.
11356                  */
11357                 tw32(offset, read_mask | write_mask);
11358
11359                 val = tr32(offset);
11360
11361                 /* Test the read-only bits. */
11362                 if ((val & read_mask) != read_val)
11363                         goto out;
11364
11365                 /* Test the read/write bits. */
11366                 if ((val & write_mask) != write_mask)
11367                         goto out;
11368
11369                 tw32(offset, save_val);
11370         }
11371
11372         return 0;
11373
11374 out:
11375         if (netif_msg_hw(tp))
11376                 netdev_err(tp->dev,
11377                            "Register test failed at offset %x\n", offset);
11378         tw32(offset, save_val);
11379         return -EIO;
11380 }
11381
11382 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11383 {
11384         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11385         int i;
11386         u32 j;
11387
11388         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11389                 for (j = 0; j < len; j += 4) {
11390                         u32 val;
11391
11392                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11393                         tg3_read_mem(tp, offset + j, &val);
11394                         if (val != test_pattern[i])
11395                                 return -EIO;
11396                 }
11397         }
11398         return 0;
11399 }
11400
11401 static int tg3_test_memory(struct tg3 *tp)
11402 {
11403         static struct mem_entry {
11404                 u32 offset;
11405                 u32 len;
11406         } mem_tbl_570x[] = {
11407                 { 0x00000000, 0x00b50},
11408                 { 0x00002000, 0x1c000},
11409                 { 0xffffffff, 0x00000}
11410         }, mem_tbl_5705[] = {
11411                 { 0x00000100, 0x0000c},
11412                 { 0x00000200, 0x00008},
11413                 { 0x00004000, 0x00800},
11414                 { 0x00006000, 0x01000},
11415                 { 0x00008000, 0x02000},
11416                 { 0x00010000, 0x0e000},
11417                 { 0xffffffff, 0x00000}
11418         }, mem_tbl_5755[] = {
11419                 { 0x00000200, 0x00008},
11420                 { 0x00004000, 0x00800},
11421                 { 0x00006000, 0x00800},
11422                 { 0x00008000, 0x02000},
11423                 { 0x00010000, 0x0c000},
11424                 { 0xffffffff, 0x00000}
11425         }, mem_tbl_5906[] = {
11426                 { 0x00000200, 0x00008},
11427                 { 0x00004000, 0x00400},
11428                 { 0x00006000, 0x00400},
11429                 { 0x00008000, 0x01000},
11430                 { 0x00010000, 0x01000},
11431                 { 0xffffffff, 0x00000}
11432         }, mem_tbl_5717[] = {
11433                 { 0x00000200, 0x00008},
11434                 { 0x00010000, 0x0a000},
11435                 { 0x00020000, 0x13c00},
11436                 { 0xffffffff, 0x00000}
11437         }, mem_tbl_57765[] = {
11438                 { 0x00000200, 0x00008},
11439                 { 0x00004000, 0x00800},
11440                 { 0x00006000, 0x09800},
11441                 { 0x00010000, 0x0a000},
11442                 { 0xffffffff, 0x00000}
11443         };
11444         struct mem_entry *mem_tbl;
11445         int err = 0;
11446         int i;
11447
11448         if (tg3_flag(tp, 5717_PLUS))
11449                 mem_tbl = mem_tbl_5717;
11450         else if (tg3_flag(tp, 57765_CLASS))
11451                 mem_tbl = mem_tbl_57765;
11452         else if (tg3_flag(tp, 5755_PLUS))
11453                 mem_tbl = mem_tbl_5755;
11454         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11455                 mem_tbl = mem_tbl_5906;
11456         else if (tg3_flag(tp, 5705_PLUS))
11457                 mem_tbl = mem_tbl_5705;
11458         else
11459                 mem_tbl = mem_tbl_570x;
11460
11461         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11462                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11463                 if (err)
11464                         break;
11465         }
11466
11467         return err;
11468 }
11469
11470 #define TG3_TSO_MSS             500
11471
11472 #define TG3_TSO_IP_HDR_LEN      20
11473 #define TG3_TSO_TCP_HDR_LEN     20
11474 #define TG3_TSO_TCP_OPT_LEN     12
11475
11476 static const u8 tg3_tso_header[] = {
11477 0x08, 0x00,
11478 0x45, 0x00, 0x00, 0x00,
11479 0x00, 0x00, 0x40, 0x00,
11480 0x40, 0x06, 0x00, 0x00,
11481 0x0a, 0x00, 0x00, 0x01,
11482 0x0a, 0x00, 0x00, 0x02,
11483 0x0d, 0x00, 0xe0, 0x00,
11484 0x00, 0x00, 0x01, 0x00,
11485 0x00, 0x00, 0x02, 0x00,
11486 0x80, 0x10, 0x10, 0x00,
11487 0x14, 0x09, 0x00, 0x00,
11488 0x01, 0x01, 0x08, 0x0a,
11489 0x11, 0x11, 0x11, 0x11,
11490 0x11, 0x11, 0x11, 0x11,
11491 };
11492
11493 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11494 {
11495         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11496         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11497         u32 budget;
11498         struct sk_buff *skb;
11499         u8 *tx_data, *rx_data;
11500         dma_addr_t map;
11501         int num_pkts, tx_len, rx_len, i, err;
11502         struct tg3_rx_buffer_desc *desc;
11503         struct tg3_napi *tnapi, *rnapi;
11504         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11505
11506         tnapi = &tp->napi[0];
11507         rnapi = &tp->napi[0];
11508         if (tp->irq_cnt > 1) {
11509                 if (tg3_flag(tp, ENABLE_RSS))
11510                         rnapi = &tp->napi[1];
11511                 if (tg3_flag(tp, ENABLE_TSS))
11512                         tnapi = &tp->napi[1];
11513         }
11514         coal_now = tnapi->coal_now | rnapi->coal_now;
11515
11516         err = -EIO;
11517
11518         tx_len = pktsz;
11519         skb = netdev_alloc_skb(tp->dev, tx_len);
11520         if (!skb)
11521                 return -ENOMEM;
11522
11523         tx_data = skb_put(skb, tx_len);
11524         memcpy(tx_data, tp->dev->dev_addr, 6);
11525         memset(tx_data + 6, 0x0, 8);
11526
11527         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11528
11529         if (tso_loopback) {
11530                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11531
11532                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11533                               TG3_TSO_TCP_OPT_LEN;
11534
11535                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11536                        sizeof(tg3_tso_header));
11537                 mss = TG3_TSO_MSS;
11538
11539                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11540                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11541
11542                 /* Set the total length field in the IP header */
11543                 iph->tot_len = htons((u16)(mss + hdr_len));
11544
11545                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11546                               TXD_FLAG_CPU_POST_DMA);
11547
11548                 if (tg3_flag(tp, HW_TSO_1) ||
11549                     tg3_flag(tp, HW_TSO_2) ||
11550                     tg3_flag(tp, HW_TSO_3)) {
11551                         struct tcphdr *th;
11552                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11553                         th = (struct tcphdr *)&tx_data[val];
11554                         th->check = 0;
11555                 } else
11556                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11557
11558                 if (tg3_flag(tp, HW_TSO_3)) {
11559                         mss |= (hdr_len & 0xc) << 12;
11560                         if (hdr_len & 0x10)
11561                                 base_flags |= 0x00000010;
11562                         base_flags |= (hdr_len & 0x3e0) << 5;
11563                 } else if (tg3_flag(tp, HW_TSO_2))
11564                         mss |= hdr_len << 9;
11565                 else if (tg3_flag(tp, HW_TSO_1) ||
11566                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11567                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11568                 } else {
11569                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11570                 }
11571
11572                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11573         } else {
11574                 num_pkts = 1;
11575                 data_off = ETH_HLEN;
11576         }
11577
11578         for (i = data_off; i < tx_len; i++)
11579                 tx_data[i] = (u8) (i & 0xff);
11580
11581         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11582         if (pci_dma_mapping_error(tp->pdev, map)) {
11583                 dev_kfree_skb(skb);
11584                 return -EIO;
11585         }
11586
11587         val = tnapi->tx_prod;
11588         tnapi->tx_buffers[val].skb = skb;
11589         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11590
11591         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11592                rnapi->coal_now);
11593
11594         udelay(10);
11595
11596         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11597
11598         budget = tg3_tx_avail(tnapi);
11599         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11600                             base_flags | TXD_FLAG_END, mss, 0)) {
11601                 tnapi->tx_buffers[val].skb = NULL;
11602                 dev_kfree_skb(skb);
11603                 return -EIO;
11604         }
11605
11606         tnapi->tx_prod++;
11607
11608         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11609         tr32_mailbox(tnapi->prodmbox);
11610
11611         udelay(10);
11612
11613         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11614         for (i = 0; i < 35; i++) {
11615                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11616                        coal_now);
11617
11618                 udelay(10);
11619
11620                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11621                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11622                 if ((tx_idx == tnapi->tx_prod) &&
11623                     (rx_idx == (rx_start_idx + num_pkts)))
11624                         break;
11625         }
11626
11627         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11628         dev_kfree_skb(skb);
11629
11630         if (tx_idx != tnapi->tx_prod)
11631                 goto out;
11632
11633         if (rx_idx != rx_start_idx + num_pkts)
11634                 goto out;
11635
11636         val = data_off;
11637         while (rx_idx != rx_start_idx) {
11638                 desc = &rnapi->rx_rcb[rx_start_idx++];
11639                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11640                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11641
11642                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11643                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11644                         goto out;
11645
11646                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11647                          - ETH_FCS_LEN;
11648
11649                 if (!tso_loopback) {
11650                         if (rx_len != tx_len)
11651                                 goto out;
11652
11653                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11654                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11655                                         goto out;
11656                         } else {
11657                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11658                                         goto out;
11659                         }
11660                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11661                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11662                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11663                         goto out;
11664                 }
11665
11666                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11667                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11668                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11669                                              mapping);
11670                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11671                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11672                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11673                                              mapping);
11674                 } else
11675                         goto out;
11676
11677                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11678                                             PCI_DMA_FROMDEVICE);
11679
11680                 rx_data += TG3_RX_OFFSET(tp);
11681                 for (i = data_off; i < rx_len; i++, val++) {
11682                         if (*(rx_data + i) != (u8) (val & 0xff))
11683                                 goto out;
11684                 }
11685         }
11686
11687         err = 0;
11688
11689         /* tg3_free_rings will unmap and free the rx_data */
11690 out:
11691         return err;
11692 }
11693
11694 #define TG3_STD_LOOPBACK_FAILED         1
11695 #define TG3_JMB_LOOPBACK_FAILED         2
11696 #define TG3_TSO_LOOPBACK_FAILED         4
11697 #define TG3_LOOPBACK_FAILED \
11698         (TG3_STD_LOOPBACK_FAILED | \
11699          TG3_JMB_LOOPBACK_FAILED | \
11700          TG3_TSO_LOOPBACK_FAILED)
11701
11702 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11703 {
11704         int err = -EIO;
11705         u32 eee_cap;
11706
11707         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11708         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11709
11710         if (!netif_running(tp->dev)) {
11711                 data[0] = TG3_LOOPBACK_FAILED;
11712                 data[1] = TG3_LOOPBACK_FAILED;
11713                 if (do_extlpbk)
11714                         data[2] = TG3_LOOPBACK_FAILED;
11715                 goto done;
11716         }
11717
11718         err = tg3_reset_hw(tp, 1);
11719         if (err) {
11720                 data[0] = TG3_LOOPBACK_FAILED;
11721                 data[1] = TG3_LOOPBACK_FAILED;
11722                 if (do_extlpbk)
11723                         data[2] = TG3_LOOPBACK_FAILED;
11724                 goto done;
11725         }
11726
11727         if (tg3_flag(tp, ENABLE_RSS)) {
11728                 int i;
11729
11730                 /* Reroute all rx packets to the 1st queue */
11731                 for (i = MAC_RSS_INDIR_TBL_0;
11732                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11733                         tw32(i, 0x0);
11734         }
11735
11736         /* HW errata - mac loopback fails in some cases on 5780.
11737          * Normal traffic and PHY loopback are not affected by
11738          * errata.  Also, the MAC loopback test is deprecated for
11739          * all newer ASIC revisions.
11740          */
11741         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11742             !tg3_flag(tp, CPMU_PRESENT)) {
11743                 tg3_mac_loopback(tp, true);
11744
11745                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11746                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11747
11748                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11749                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11750                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11751
11752                 tg3_mac_loopback(tp, false);
11753         }
11754
11755         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11756             !tg3_flag(tp, USE_PHYLIB)) {
11757                 int i;
11758
11759                 tg3_phy_lpbk_set(tp, 0, false);
11760
11761                 /* Wait for link */
11762                 for (i = 0; i < 100; i++) {
11763                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11764                                 break;
11765                         mdelay(1);
11766                 }
11767
11768                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11769                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11770                 if (tg3_flag(tp, TSO_CAPABLE) &&
11771                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11772                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11773                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11774                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11775                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11776
11777                 if (do_extlpbk) {
11778                         tg3_phy_lpbk_set(tp, 0, true);
11779
11780                         /* All link indications report up, but the hardware
11781                          * isn't really ready for about 20 msec.  Double it
11782                          * to be sure.
11783                          */
11784                         mdelay(40);
11785
11786                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11787                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11788                         if (tg3_flag(tp, TSO_CAPABLE) &&
11789                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11790                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11791                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11792                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11793                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11794                 }
11795
11796                 /* Re-enable gphy autopowerdown. */
11797                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11798                         tg3_phy_toggle_apd(tp, true);
11799         }
11800
11801         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11802
11803 done:
11804         tp->phy_flags |= eee_cap;
11805
11806         return err;
11807 }
11808
11809 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11810                           u64 *data)
11811 {
11812         struct tg3 *tp = netdev_priv(dev);
11813         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11814
11815         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11816             tg3_power_up(tp)) {
11817                 etest->flags |= ETH_TEST_FL_FAILED;
11818                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11819                 return;
11820         }
11821
11822         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11823
11824         if (tg3_test_nvram(tp) != 0) {
11825                 etest->flags |= ETH_TEST_FL_FAILED;
11826                 data[0] = 1;
11827         }
11828         if (!doextlpbk && tg3_test_link(tp)) {
11829                 etest->flags |= ETH_TEST_FL_FAILED;
11830                 data[1] = 1;
11831         }
11832         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11833                 int err, err2 = 0, irq_sync = 0;
11834
11835                 if (netif_running(dev)) {
11836                         tg3_phy_stop(tp);
11837                         tg3_netif_stop(tp);
11838                         irq_sync = 1;
11839                 }
11840
11841                 tg3_full_lock(tp, irq_sync);
11842
11843                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11844                 err = tg3_nvram_lock(tp);
11845                 tg3_halt_cpu(tp, RX_CPU_BASE);
11846                 if (!tg3_flag(tp, 5705_PLUS))
11847                         tg3_halt_cpu(tp, TX_CPU_BASE);
11848                 if (!err)
11849                         tg3_nvram_unlock(tp);
11850
11851                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11852                         tg3_phy_reset(tp);
11853
11854                 if (tg3_test_registers(tp) != 0) {
11855                         etest->flags |= ETH_TEST_FL_FAILED;
11856                         data[2] = 1;
11857                 }
11858
11859                 if (tg3_test_memory(tp) != 0) {
11860                         etest->flags |= ETH_TEST_FL_FAILED;
11861                         data[3] = 1;
11862                 }
11863
11864                 if (doextlpbk)
11865                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11866
11867                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11868                         etest->flags |= ETH_TEST_FL_FAILED;
11869
11870                 tg3_full_unlock(tp);
11871
11872                 if (tg3_test_interrupt(tp) != 0) {
11873                         etest->flags |= ETH_TEST_FL_FAILED;
11874                         data[7] = 1;
11875                 }
11876
11877                 tg3_full_lock(tp, 0);
11878
11879                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11880                 if (netif_running(dev)) {
11881                         tg3_flag_set(tp, INIT_COMPLETE);
11882                         err2 = tg3_restart_hw(tp, 1);
11883                         if (!err2)
11884                                 tg3_netif_start(tp);
11885                 }
11886
11887                 tg3_full_unlock(tp);
11888
11889                 if (irq_sync && !err2)
11890                         tg3_phy_start(tp);
11891         }
11892         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11893                 tg3_power_down(tp);
11894
11895 }
11896
11897 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11898 {
11899         struct mii_ioctl_data *data = if_mii(ifr);
11900         struct tg3 *tp = netdev_priv(dev);
11901         int err;
11902
11903         if (tg3_flag(tp, USE_PHYLIB)) {
11904                 struct phy_device *phydev;
11905                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11906                         return -EAGAIN;
11907                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11908                 return phy_mii_ioctl(phydev, ifr, cmd);
11909         }
11910
11911         switch (cmd) {
11912         case SIOCGMIIPHY:
11913                 data->phy_id = tp->phy_addr;
11914
11915                 /* fallthru */
11916         case SIOCGMIIREG: {
11917                 u32 mii_regval;
11918
11919                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11920                         break;                  /* We have no PHY */
11921
11922                 if (!netif_running(dev))
11923                         return -EAGAIN;
11924
11925                 spin_lock_bh(&tp->lock);
11926                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11927                 spin_unlock_bh(&tp->lock);
11928
11929                 data->val_out = mii_regval;
11930
11931                 return err;
11932         }
11933
11934         case SIOCSMIIREG:
11935                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11936                         break;                  /* We have no PHY */
11937
11938                 if (!netif_running(dev))
11939                         return -EAGAIN;
11940
11941                 spin_lock_bh(&tp->lock);
11942                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11943                 spin_unlock_bh(&tp->lock);
11944
11945                 return err;
11946
11947         default:
11948                 /* do nothing */
11949                 break;
11950         }
11951         return -EOPNOTSUPP;
11952 }
11953
11954 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11955 {
11956         struct tg3 *tp = netdev_priv(dev);
11957
11958         memcpy(ec, &tp->coal, sizeof(*ec));
11959         return 0;
11960 }
11961
11962 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11963 {
11964         struct tg3 *tp = netdev_priv(dev);
11965         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11966         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11967
11968         if (!tg3_flag(tp, 5705_PLUS)) {
11969                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11970                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11971                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11972                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11973         }
11974
11975         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11976             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11977             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11978             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11979             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11980             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11981             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11982             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11983             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11984             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11985                 return -EINVAL;
11986
11987         /* No rx interrupts will be generated if both are zero */
11988         if ((ec->rx_coalesce_usecs == 0) &&
11989             (ec->rx_max_coalesced_frames == 0))
11990                 return -EINVAL;
11991
11992         /* No tx interrupts will be generated if both are zero */
11993         if ((ec->tx_coalesce_usecs == 0) &&
11994             (ec->tx_max_coalesced_frames == 0))
11995                 return -EINVAL;
11996
11997         /* Only copy relevant parameters, ignore all others. */
11998         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11999         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12000         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12001         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12002         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12003         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12004         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12005         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12006         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12007
12008         if (netif_running(dev)) {
12009                 tg3_full_lock(tp, 0);
12010                 __tg3_set_coalesce(tp, &tp->coal);
12011                 tg3_full_unlock(tp);
12012         }
12013         return 0;
12014 }
12015
12016 static const struct ethtool_ops tg3_ethtool_ops = {
12017         .get_settings           = tg3_get_settings,
12018         .set_settings           = tg3_set_settings,
12019         .get_drvinfo            = tg3_get_drvinfo,
12020         .get_regs_len           = tg3_get_regs_len,
12021         .get_regs               = tg3_get_regs,
12022         .get_wol                = tg3_get_wol,
12023         .set_wol                = tg3_set_wol,
12024         .get_msglevel           = tg3_get_msglevel,
12025         .set_msglevel           = tg3_set_msglevel,
12026         .nway_reset             = tg3_nway_reset,
12027         .get_link               = ethtool_op_get_link,
12028         .get_eeprom_len         = tg3_get_eeprom_len,
12029         .get_eeprom             = tg3_get_eeprom,
12030         .set_eeprom             = tg3_set_eeprom,
12031         .get_ringparam          = tg3_get_ringparam,
12032         .set_ringparam          = tg3_set_ringparam,
12033         .get_pauseparam         = tg3_get_pauseparam,
12034         .set_pauseparam         = tg3_set_pauseparam,
12035         .self_test              = tg3_self_test,
12036         .get_strings            = tg3_get_strings,
12037         .set_phys_id            = tg3_set_phys_id,
12038         .get_ethtool_stats      = tg3_get_ethtool_stats,
12039         .get_coalesce           = tg3_get_coalesce,
12040         .set_coalesce           = tg3_set_coalesce,
12041         .get_sset_count         = tg3_get_sset_count,
12042         .get_rxnfc              = tg3_get_rxnfc,
12043         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12044         .get_rxfh_indir         = tg3_get_rxfh_indir,
12045         .set_rxfh_indir         = tg3_set_rxfh_indir,
12046 };
12047
12048 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12049 {
12050         u32 cursize, val, magic;
12051
12052         tp->nvram_size = EEPROM_CHIP_SIZE;
12053
12054         if (tg3_nvram_read(tp, 0, &magic) != 0)
12055                 return;
12056
12057         if ((magic != TG3_EEPROM_MAGIC) &&
12058             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12059             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12060                 return;
12061
12062         /*
12063          * Size the chip by reading offsets at increasing powers of two.
12064          * When we encounter our validation signature, we know the addressing
12065          * has wrapped around, and thus have our chip size.
12066          */
12067         cursize = 0x10;
12068
12069         while (cursize < tp->nvram_size) {
12070                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12071                         return;
12072
12073                 if (val == magic)
12074                         break;
12075
12076                 cursize <<= 1;
12077         }
12078
12079         tp->nvram_size = cursize;
12080 }
12081
12082 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12083 {
12084         u32 val;
12085
12086         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12087                 return;
12088
12089         /* Selfboot format */
12090         if (val != TG3_EEPROM_MAGIC) {
12091                 tg3_get_eeprom_size(tp);
12092                 return;
12093         }
12094
12095         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12096                 if (val != 0) {
12097                         /* This is confusing.  We want to operate on the
12098                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12099                          * call will read from NVRAM and byteswap the data
12100                          * according to the byteswapping settings for all
12101                          * other register accesses.  This ensures the data we
12102                          * want will always reside in the lower 16-bits.
12103                          * However, the data in NVRAM is in LE format, which
12104                          * means the data from the NVRAM read will always be
12105                          * opposite the endianness of the CPU.  The 16-bit
12106                          * byteswap then brings the data to CPU endianness.
12107                          */
12108                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12109                         return;
12110                 }
12111         }
12112         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12113 }
12114
12115 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12116 {
12117         u32 nvcfg1;
12118
12119         nvcfg1 = tr32(NVRAM_CFG1);
12120         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12121                 tg3_flag_set(tp, FLASH);
12122         } else {
12123                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12124                 tw32(NVRAM_CFG1, nvcfg1);
12125         }
12126
12127         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12128             tg3_flag(tp, 5780_CLASS)) {
12129                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12130                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12131                         tp->nvram_jedecnum = JEDEC_ATMEL;
12132                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12133                         tg3_flag_set(tp, NVRAM_BUFFERED);
12134                         break;
12135                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12136                         tp->nvram_jedecnum = JEDEC_ATMEL;
12137                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12138                         break;
12139                 case FLASH_VENDOR_ATMEL_EEPROM:
12140                         tp->nvram_jedecnum = JEDEC_ATMEL;
12141                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12142                         tg3_flag_set(tp, NVRAM_BUFFERED);
12143                         break;
12144                 case FLASH_VENDOR_ST:
12145                         tp->nvram_jedecnum = JEDEC_ST;
12146                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12147                         tg3_flag_set(tp, NVRAM_BUFFERED);
12148                         break;
12149                 case FLASH_VENDOR_SAIFUN:
12150                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12151                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12152                         break;
12153                 case FLASH_VENDOR_SST_SMALL:
12154                 case FLASH_VENDOR_SST_LARGE:
12155                         tp->nvram_jedecnum = JEDEC_SST;
12156                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12157                         break;
12158                 }
12159         } else {
12160                 tp->nvram_jedecnum = JEDEC_ATMEL;
12161                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12162                 tg3_flag_set(tp, NVRAM_BUFFERED);
12163         }
12164 }
12165
12166 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12167 {
12168         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12169         case FLASH_5752PAGE_SIZE_256:
12170                 tp->nvram_pagesize = 256;
12171                 break;
12172         case FLASH_5752PAGE_SIZE_512:
12173                 tp->nvram_pagesize = 512;
12174                 break;
12175         case FLASH_5752PAGE_SIZE_1K:
12176                 tp->nvram_pagesize = 1024;
12177                 break;
12178         case FLASH_5752PAGE_SIZE_2K:
12179                 tp->nvram_pagesize = 2048;
12180                 break;
12181         case FLASH_5752PAGE_SIZE_4K:
12182                 tp->nvram_pagesize = 4096;
12183                 break;
12184         case FLASH_5752PAGE_SIZE_264:
12185                 tp->nvram_pagesize = 264;
12186                 break;
12187         case FLASH_5752PAGE_SIZE_528:
12188                 tp->nvram_pagesize = 528;
12189                 break;
12190         }
12191 }
12192
12193 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12194 {
12195         u32 nvcfg1;
12196
12197         nvcfg1 = tr32(NVRAM_CFG1);
12198
12199         /* NVRAM protection for TPM */
12200         if (nvcfg1 & (1 << 27))
12201                 tg3_flag_set(tp, PROTECTED_NVRAM);
12202
12203         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12204         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12205         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12206                 tp->nvram_jedecnum = JEDEC_ATMEL;
12207                 tg3_flag_set(tp, NVRAM_BUFFERED);
12208                 break;
12209         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12210                 tp->nvram_jedecnum = JEDEC_ATMEL;
12211                 tg3_flag_set(tp, NVRAM_BUFFERED);
12212                 tg3_flag_set(tp, FLASH);
12213                 break;
12214         case FLASH_5752VENDOR_ST_M45PE10:
12215         case FLASH_5752VENDOR_ST_M45PE20:
12216         case FLASH_5752VENDOR_ST_M45PE40:
12217                 tp->nvram_jedecnum = JEDEC_ST;
12218                 tg3_flag_set(tp, NVRAM_BUFFERED);
12219                 tg3_flag_set(tp, FLASH);
12220                 break;
12221         }
12222
12223         if (tg3_flag(tp, FLASH)) {
12224                 tg3_nvram_get_pagesize(tp, nvcfg1);
12225         } else {
12226                 /* For eeprom, set pagesize to maximum eeprom size */
12227                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12228
12229                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12230                 tw32(NVRAM_CFG1, nvcfg1);
12231         }
12232 }
12233
12234 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12235 {
12236         u32 nvcfg1, protect = 0;
12237
12238         nvcfg1 = tr32(NVRAM_CFG1);
12239
12240         /* NVRAM protection for TPM */
12241         if (nvcfg1 & (1 << 27)) {
12242                 tg3_flag_set(tp, PROTECTED_NVRAM);
12243                 protect = 1;
12244         }
12245
12246         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12247         switch (nvcfg1) {
12248         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12249         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12250         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12251         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12252                 tp->nvram_jedecnum = JEDEC_ATMEL;
12253                 tg3_flag_set(tp, NVRAM_BUFFERED);
12254                 tg3_flag_set(tp, FLASH);
12255                 tp->nvram_pagesize = 264;
12256                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12257                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12258                         tp->nvram_size = (protect ? 0x3e200 :
12259                                           TG3_NVRAM_SIZE_512KB);
12260                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12261                         tp->nvram_size = (protect ? 0x1f200 :
12262                                           TG3_NVRAM_SIZE_256KB);
12263                 else
12264                         tp->nvram_size = (protect ? 0x1f200 :
12265                                           TG3_NVRAM_SIZE_128KB);
12266                 break;
12267         case FLASH_5752VENDOR_ST_M45PE10:
12268         case FLASH_5752VENDOR_ST_M45PE20:
12269         case FLASH_5752VENDOR_ST_M45PE40:
12270                 tp->nvram_jedecnum = JEDEC_ST;
12271                 tg3_flag_set(tp, NVRAM_BUFFERED);
12272                 tg3_flag_set(tp, FLASH);
12273                 tp->nvram_pagesize = 256;
12274                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12275                         tp->nvram_size = (protect ?
12276                                           TG3_NVRAM_SIZE_64KB :
12277                                           TG3_NVRAM_SIZE_128KB);
12278                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12279                         tp->nvram_size = (protect ?
12280                                           TG3_NVRAM_SIZE_64KB :
12281                                           TG3_NVRAM_SIZE_256KB);
12282                 else
12283                         tp->nvram_size = (protect ?
12284                                           TG3_NVRAM_SIZE_128KB :
12285                                           TG3_NVRAM_SIZE_512KB);
12286                 break;
12287         }
12288 }
12289
12290 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12291 {
12292         u32 nvcfg1;
12293
12294         nvcfg1 = tr32(NVRAM_CFG1);
12295
12296         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12297         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12298         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12299         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12300         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12301                 tp->nvram_jedecnum = JEDEC_ATMEL;
12302                 tg3_flag_set(tp, NVRAM_BUFFERED);
12303                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12304
12305                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12306                 tw32(NVRAM_CFG1, nvcfg1);
12307                 break;
12308         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12309         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12310         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12311         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12312                 tp->nvram_jedecnum = JEDEC_ATMEL;
12313                 tg3_flag_set(tp, NVRAM_BUFFERED);
12314                 tg3_flag_set(tp, FLASH);
12315                 tp->nvram_pagesize = 264;
12316                 break;
12317         case FLASH_5752VENDOR_ST_M45PE10:
12318         case FLASH_5752VENDOR_ST_M45PE20:
12319         case FLASH_5752VENDOR_ST_M45PE40:
12320                 tp->nvram_jedecnum = JEDEC_ST;
12321                 tg3_flag_set(tp, NVRAM_BUFFERED);
12322                 tg3_flag_set(tp, FLASH);
12323                 tp->nvram_pagesize = 256;
12324                 break;
12325         }
12326 }
12327
12328 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12329 {
12330         u32 nvcfg1, protect = 0;
12331
12332         nvcfg1 = tr32(NVRAM_CFG1);
12333
12334         /* NVRAM protection for TPM */
12335         if (nvcfg1 & (1 << 27)) {
12336                 tg3_flag_set(tp, PROTECTED_NVRAM);
12337                 protect = 1;
12338         }
12339
12340         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12341         switch (nvcfg1) {
12342         case FLASH_5761VENDOR_ATMEL_ADB021D:
12343         case FLASH_5761VENDOR_ATMEL_ADB041D:
12344         case FLASH_5761VENDOR_ATMEL_ADB081D:
12345         case FLASH_5761VENDOR_ATMEL_ADB161D:
12346         case FLASH_5761VENDOR_ATMEL_MDB021D:
12347         case FLASH_5761VENDOR_ATMEL_MDB041D:
12348         case FLASH_5761VENDOR_ATMEL_MDB081D:
12349         case FLASH_5761VENDOR_ATMEL_MDB161D:
12350                 tp->nvram_jedecnum = JEDEC_ATMEL;
12351                 tg3_flag_set(tp, NVRAM_BUFFERED);
12352                 tg3_flag_set(tp, FLASH);
12353                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12354                 tp->nvram_pagesize = 256;
12355                 break;
12356         case FLASH_5761VENDOR_ST_A_M45PE20:
12357         case FLASH_5761VENDOR_ST_A_M45PE40:
12358         case FLASH_5761VENDOR_ST_A_M45PE80:
12359         case FLASH_5761VENDOR_ST_A_M45PE16:
12360         case FLASH_5761VENDOR_ST_M_M45PE20:
12361         case FLASH_5761VENDOR_ST_M_M45PE40:
12362         case FLASH_5761VENDOR_ST_M_M45PE80:
12363         case FLASH_5761VENDOR_ST_M_M45PE16:
12364                 tp->nvram_jedecnum = JEDEC_ST;
12365                 tg3_flag_set(tp, NVRAM_BUFFERED);
12366                 tg3_flag_set(tp, FLASH);
12367                 tp->nvram_pagesize = 256;
12368                 break;
12369         }
12370
12371         if (protect) {
12372                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12373         } else {
12374                 switch (nvcfg1) {
12375                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12376                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12377                 case FLASH_5761VENDOR_ST_A_M45PE16:
12378                 case FLASH_5761VENDOR_ST_M_M45PE16:
12379                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12380                         break;
12381                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12382                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12383                 case FLASH_5761VENDOR_ST_A_M45PE80:
12384                 case FLASH_5761VENDOR_ST_M_M45PE80:
12385                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12386                         break;
12387                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12388                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12389                 case FLASH_5761VENDOR_ST_A_M45PE40:
12390                 case FLASH_5761VENDOR_ST_M_M45PE40:
12391                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12392                         break;
12393                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12394                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12395                 case FLASH_5761VENDOR_ST_A_M45PE20:
12396                 case FLASH_5761VENDOR_ST_M_M45PE20:
12397                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12398                         break;
12399                 }
12400         }
12401 }
12402
12403 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12404 {
12405         tp->nvram_jedecnum = JEDEC_ATMEL;
12406         tg3_flag_set(tp, NVRAM_BUFFERED);
12407         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12408 }
12409
12410 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12411 {
12412         u32 nvcfg1;
12413
12414         nvcfg1 = tr32(NVRAM_CFG1);
12415
12416         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12417         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12418         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12419                 tp->nvram_jedecnum = JEDEC_ATMEL;
12420                 tg3_flag_set(tp, NVRAM_BUFFERED);
12421                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12422
12423                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12424                 tw32(NVRAM_CFG1, nvcfg1);
12425                 return;
12426         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12427         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12428         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12429         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12430         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12431         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12432         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12433                 tp->nvram_jedecnum = JEDEC_ATMEL;
12434                 tg3_flag_set(tp, NVRAM_BUFFERED);
12435                 tg3_flag_set(tp, FLASH);
12436
12437                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12438                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12439                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12440                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12441                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12442                         break;
12443                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12444                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12445                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12446                         break;
12447                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12448                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12449                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12450                         break;
12451                 }
12452                 break;
12453         case FLASH_5752VENDOR_ST_M45PE10:
12454         case FLASH_5752VENDOR_ST_M45PE20:
12455         case FLASH_5752VENDOR_ST_M45PE40:
12456                 tp->nvram_jedecnum = JEDEC_ST;
12457                 tg3_flag_set(tp, NVRAM_BUFFERED);
12458                 tg3_flag_set(tp, FLASH);
12459
12460                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12461                 case FLASH_5752VENDOR_ST_M45PE10:
12462                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12463                         break;
12464                 case FLASH_5752VENDOR_ST_M45PE20:
12465                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12466                         break;
12467                 case FLASH_5752VENDOR_ST_M45PE40:
12468                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12469                         break;
12470                 }
12471                 break;
12472         default:
12473                 tg3_flag_set(tp, NO_NVRAM);
12474                 return;
12475         }
12476
12477         tg3_nvram_get_pagesize(tp, nvcfg1);
12478         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12479                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12480 }
12481
12482
12483 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12484 {
12485         u32 nvcfg1;
12486
12487         nvcfg1 = tr32(NVRAM_CFG1);
12488
12489         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12490         case FLASH_5717VENDOR_ATMEL_EEPROM:
12491         case FLASH_5717VENDOR_MICRO_EEPROM:
12492                 tp->nvram_jedecnum = JEDEC_ATMEL;
12493                 tg3_flag_set(tp, NVRAM_BUFFERED);
12494                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12495
12496                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12497                 tw32(NVRAM_CFG1, nvcfg1);
12498                 return;
12499         case FLASH_5717VENDOR_ATMEL_MDB011D:
12500         case FLASH_5717VENDOR_ATMEL_ADB011B:
12501         case FLASH_5717VENDOR_ATMEL_ADB011D:
12502         case FLASH_5717VENDOR_ATMEL_MDB021D:
12503         case FLASH_5717VENDOR_ATMEL_ADB021B:
12504         case FLASH_5717VENDOR_ATMEL_ADB021D:
12505         case FLASH_5717VENDOR_ATMEL_45USPT:
12506                 tp->nvram_jedecnum = JEDEC_ATMEL;
12507                 tg3_flag_set(tp, NVRAM_BUFFERED);
12508                 tg3_flag_set(tp, FLASH);
12509
12510                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12511                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12512                         /* Detect size with tg3_nvram_get_size() */
12513                         break;
12514                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12515                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12516                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12517                         break;
12518                 default:
12519                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12520                         break;
12521                 }
12522                 break;
12523         case FLASH_5717VENDOR_ST_M_M25PE10:
12524         case FLASH_5717VENDOR_ST_A_M25PE10:
12525         case FLASH_5717VENDOR_ST_M_M45PE10:
12526         case FLASH_5717VENDOR_ST_A_M45PE10:
12527         case FLASH_5717VENDOR_ST_M_M25PE20:
12528         case FLASH_5717VENDOR_ST_A_M25PE20:
12529         case FLASH_5717VENDOR_ST_M_M45PE20:
12530         case FLASH_5717VENDOR_ST_A_M45PE20:
12531         case FLASH_5717VENDOR_ST_25USPT:
12532         case FLASH_5717VENDOR_ST_45USPT:
12533                 tp->nvram_jedecnum = JEDEC_ST;
12534                 tg3_flag_set(tp, NVRAM_BUFFERED);
12535                 tg3_flag_set(tp, FLASH);
12536
12537                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12538                 case FLASH_5717VENDOR_ST_M_M25PE20:
12539                 case FLASH_5717VENDOR_ST_M_M45PE20:
12540                         /* Detect size with tg3_nvram_get_size() */
12541                         break;
12542                 case FLASH_5717VENDOR_ST_A_M25PE20:
12543                 case FLASH_5717VENDOR_ST_A_M45PE20:
12544                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12545                         break;
12546                 default:
12547                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12548                         break;
12549                 }
12550                 break;
12551         default:
12552                 tg3_flag_set(tp, NO_NVRAM);
12553                 return;
12554         }
12555
12556         tg3_nvram_get_pagesize(tp, nvcfg1);
12557         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12558                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12559 }
12560
12561 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12562 {
12563         u32 nvcfg1, nvmpinstrp;
12564
12565         nvcfg1 = tr32(NVRAM_CFG1);
12566         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12567
12568         switch (nvmpinstrp) {
12569         case FLASH_5720_EEPROM_HD:
12570         case FLASH_5720_EEPROM_LD:
12571                 tp->nvram_jedecnum = JEDEC_ATMEL;
12572                 tg3_flag_set(tp, NVRAM_BUFFERED);
12573
12574                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12575                 tw32(NVRAM_CFG1, nvcfg1);
12576                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12577                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12578                 else
12579                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12580                 return;
12581         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12582         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12583         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12584         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12585         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12586         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12587         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12588         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12589         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12590         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12591         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12592         case FLASH_5720VENDOR_ATMEL_45USPT:
12593                 tp->nvram_jedecnum = JEDEC_ATMEL;
12594                 tg3_flag_set(tp, NVRAM_BUFFERED);
12595                 tg3_flag_set(tp, FLASH);
12596
12597                 switch (nvmpinstrp) {
12598                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12599                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12600                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12601                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12602                         break;
12603                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12604                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12605                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12606                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12607                         break;
12608                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12609                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12610                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12611                         break;
12612                 default:
12613                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12614                         break;
12615                 }
12616                 break;
12617         case FLASH_5720VENDOR_M_ST_M25PE10:
12618         case FLASH_5720VENDOR_M_ST_M45PE10:
12619         case FLASH_5720VENDOR_A_ST_M25PE10:
12620         case FLASH_5720VENDOR_A_ST_M45PE10:
12621         case FLASH_5720VENDOR_M_ST_M25PE20:
12622         case FLASH_5720VENDOR_M_ST_M45PE20:
12623         case FLASH_5720VENDOR_A_ST_M25PE20:
12624         case FLASH_5720VENDOR_A_ST_M45PE20:
12625         case FLASH_5720VENDOR_M_ST_M25PE40:
12626         case FLASH_5720VENDOR_M_ST_M45PE40:
12627         case FLASH_5720VENDOR_A_ST_M25PE40:
12628         case FLASH_5720VENDOR_A_ST_M45PE40:
12629         case FLASH_5720VENDOR_M_ST_M25PE80:
12630         case FLASH_5720VENDOR_M_ST_M45PE80:
12631         case FLASH_5720VENDOR_A_ST_M25PE80:
12632         case FLASH_5720VENDOR_A_ST_M45PE80:
12633         case FLASH_5720VENDOR_ST_25USPT:
12634         case FLASH_5720VENDOR_ST_45USPT:
12635                 tp->nvram_jedecnum = JEDEC_ST;
12636                 tg3_flag_set(tp, NVRAM_BUFFERED);
12637                 tg3_flag_set(tp, FLASH);
12638
12639                 switch (nvmpinstrp) {
12640                 case FLASH_5720VENDOR_M_ST_M25PE20:
12641                 case FLASH_5720VENDOR_M_ST_M45PE20:
12642                 case FLASH_5720VENDOR_A_ST_M25PE20:
12643                 case FLASH_5720VENDOR_A_ST_M45PE20:
12644                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12645                         break;
12646                 case FLASH_5720VENDOR_M_ST_M25PE40:
12647                 case FLASH_5720VENDOR_M_ST_M45PE40:
12648                 case FLASH_5720VENDOR_A_ST_M25PE40:
12649                 case FLASH_5720VENDOR_A_ST_M45PE40:
12650                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12651                         break;
12652                 case FLASH_5720VENDOR_M_ST_M25PE80:
12653                 case FLASH_5720VENDOR_M_ST_M45PE80:
12654                 case FLASH_5720VENDOR_A_ST_M25PE80:
12655                 case FLASH_5720VENDOR_A_ST_M45PE80:
12656                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12657                         break;
12658                 default:
12659                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12660                         break;
12661                 }
12662                 break;
12663         default:
12664                 tg3_flag_set(tp, NO_NVRAM);
12665                 return;
12666         }
12667
12668         tg3_nvram_get_pagesize(tp, nvcfg1);
12669         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12670                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12671 }
12672
12673 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12674 static void __devinit tg3_nvram_init(struct tg3 *tp)
12675 {
12676         tw32_f(GRC_EEPROM_ADDR,
12677              (EEPROM_ADDR_FSM_RESET |
12678               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12679                EEPROM_ADDR_CLKPERD_SHIFT)));
12680
12681         msleep(1);
12682
12683         /* Enable seeprom accesses. */
12684         tw32_f(GRC_LOCAL_CTRL,
12685              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12686         udelay(100);
12687
12688         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12689             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12690                 tg3_flag_set(tp, NVRAM);
12691
12692                 if (tg3_nvram_lock(tp)) {
12693                         netdev_warn(tp->dev,
12694                                     "Cannot get nvram lock, %s failed\n",
12695                                     __func__);
12696                         return;
12697                 }
12698                 tg3_enable_nvram_access(tp);
12699
12700                 tp->nvram_size = 0;
12701
12702                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12703                         tg3_get_5752_nvram_info(tp);
12704                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12705                         tg3_get_5755_nvram_info(tp);
12706                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12707                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12708                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12709                         tg3_get_5787_nvram_info(tp);
12710                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12711                         tg3_get_5761_nvram_info(tp);
12712                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12713                         tg3_get_5906_nvram_info(tp);
12714                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12715                          tg3_flag(tp, 57765_CLASS))
12716                         tg3_get_57780_nvram_info(tp);
12717                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12718                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12719                         tg3_get_5717_nvram_info(tp);
12720                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12721                         tg3_get_5720_nvram_info(tp);
12722                 else
12723                         tg3_get_nvram_info(tp);
12724
12725                 if (tp->nvram_size == 0)
12726                         tg3_get_nvram_size(tp);
12727
12728                 tg3_disable_nvram_access(tp);
12729                 tg3_nvram_unlock(tp);
12730
12731         } else {
12732                 tg3_flag_clear(tp, NVRAM);
12733                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12734
12735                 tg3_get_eeprom_size(tp);
12736         }
12737 }
12738
12739 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12740                                     u32 offset, u32 len, u8 *buf)
12741 {
12742         int i, j, rc = 0;
12743         u32 val;
12744
12745         for (i = 0; i < len; i += 4) {
12746                 u32 addr;
12747                 __be32 data;
12748
12749                 addr = offset + i;
12750
12751                 memcpy(&data, buf + i, 4);
12752
12753                 /*
12754                  * The SEEPROM interface expects the data to always be opposite
12755                  * the native endian format.  We accomplish this by reversing
12756                  * all the operations that would have been performed on the
12757                  * data from a call to tg3_nvram_read_be32().
12758                  */
12759                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12760
12761                 val = tr32(GRC_EEPROM_ADDR);
12762                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12763
12764                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12765                         EEPROM_ADDR_READ);
12766                 tw32(GRC_EEPROM_ADDR, val |
12767                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12768                         (addr & EEPROM_ADDR_ADDR_MASK) |
12769                         EEPROM_ADDR_START |
12770                         EEPROM_ADDR_WRITE);
12771
12772                 for (j = 0; j < 1000; j++) {
12773                         val = tr32(GRC_EEPROM_ADDR);
12774
12775                         if (val & EEPROM_ADDR_COMPLETE)
12776                                 break;
12777                         msleep(1);
12778                 }
12779                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12780                         rc = -EBUSY;
12781                         break;
12782                 }
12783         }
12784
12785         return rc;
12786 }
12787
12788 /* offset and length are dword aligned */
12789 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12790                 u8 *buf)
12791 {
12792         int ret = 0;
12793         u32 pagesize = tp->nvram_pagesize;
12794         u32 pagemask = pagesize - 1;
12795         u32 nvram_cmd;
12796         u8 *tmp;
12797
12798         tmp = kmalloc(pagesize, GFP_KERNEL);
12799         if (tmp == NULL)
12800                 return -ENOMEM;
12801
12802         while (len) {
12803                 int j;
12804                 u32 phy_addr, page_off, size;
12805
12806                 phy_addr = offset & ~pagemask;
12807
12808                 for (j = 0; j < pagesize; j += 4) {
12809                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12810                                                   (__be32 *) (tmp + j));
12811                         if (ret)
12812                                 break;
12813                 }
12814                 if (ret)
12815                         break;
12816
12817                 page_off = offset & pagemask;
12818                 size = pagesize;
12819                 if (len < size)
12820                         size = len;
12821
12822                 len -= size;
12823
12824                 memcpy(tmp + page_off, buf, size);
12825
12826                 offset = offset + (pagesize - page_off);
12827
12828                 tg3_enable_nvram_access(tp);
12829
12830                 /*
12831                  * Before we can erase the flash page, we need
12832                  * to issue a special "write enable" command.
12833                  */
12834                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12835
12836                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12837                         break;
12838
12839                 /* Erase the target page */
12840                 tw32(NVRAM_ADDR, phy_addr);
12841
12842                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12843                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12844
12845                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12846                         break;
12847
12848                 /* Issue another write enable to start the write. */
12849                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12850
12851                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12852                         break;
12853
12854                 for (j = 0; j < pagesize; j += 4) {
12855                         __be32 data;
12856
12857                         data = *((__be32 *) (tmp + j));
12858
12859                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12860
12861                         tw32(NVRAM_ADDR, phy_addr + j);
12862
12863                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12864                                 NVRAM_CMD_WR;
12865
12866                         if (j == 0)
12867                                 nvram_cmd |= NVRAM_CMD_FIRST;
12868                         else if (j == (pagesize - 4))
12869                                 nvram_cmd |= NVRAM_CMD_LAST;
12870
12871                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12872                                 break;
12873                 }
12874                 if (ret)
12875                         break;
12876         }
12877
12878         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12879         tg3_nvram_exec_cmd(tp, nvram_cmd);
12880
12881         kfree(tmp);
12882
12883         return ret;
12884 }
12885
12886 /* offset and length are dword aligned */
12887 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12888                 u8 *buf)
12889 {
12890         int i, ret = 0;
12891
12892         for (i = 0; i < len; i += 4, offset += 4) {
12893                 u32 page_off, phy_addr, nvram_cmd;
12894                 __be32 data;
12895
12896                 memcpy(&data, buf + i, 4);
12897                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12898
12899                 page_off = offset % tp->nvram_pagesize;
12900
12901                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12902
12903                 tw32(NVRAM_ADDR, phy_addr);
12904
12905                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12906
12907                 if (page_off == 0 || i == 0)
12908                         nvram_cmd |= NVRAM_CMD_FIRST;
12909                 if (page_off == (tp->nvram_pagesize - 4))
12910                         nvram_cmd |= NVRAM_CMD_LAST;
12911
12912                 if (i == (len - 4))
12913                         nvram_cmd |= NVRAM_CMD_LAST;
12914
12915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12916                     !tg3_flag(tp, 5755_PLUS) &&
12917                     (tp->nvram_jedecnum == JEDEC_ST) &&
12918                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12919
12920                         if ((ret = tg3_nvram_exec_cmd(tp,
12921                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12922                                 NVRAM_CMD_DONE)))
12923
12924                                 break;
12925                 }
12926                 if (!tg3_flag(tp, FLASH)) {
12927                         /* We always do complete word writes to eeprom. */
12928                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12929                 }
12930
12931                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12932                         break;
12933         }
12934         return ret;
12935 }
12936
12937 /* offset and length are dword aligned */
12938 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12939 {
12940         int ret;
12941
12942         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12943                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12944                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12945                 udelay(40);
12946         }
12947
12948         if (!tg3_flag(tp, NVRAM)) {
12949                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12950         } else {
12951                 u32 grc_mode;
12952
12953                 ret = tg3_nvram_lock(tp);
12954                 if (ret)
12955                         return ret;
12956
12957                 tg3_enable_nvram_access(tp);
12958                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12959                         tw32(NVRAM_WRITE1, 0x406);
12960
12961                 grc_mode = tr32(GRC_MODE);
12962                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12963
12964                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12965                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12966                                 buf);
12967                 } else {
12968                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12969                                 buf);
12970                 }
12971
12972                 grc_mode = tr32(GRC_MODE);
12973                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12974
12975                 tg3_disable_nvram_access(tp);
12976                 tg3_nvram_unlock(tp);
12977         }
12978
12979         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12980                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12981                 udelay(40);
12982         }
12983
12984         return ret;
12985 }
12986
12987 struct subsys_tbl_ent {
12988         u16 subsys_vendor, subsys_devid;
12989         u32 phy_id;
12990 };
12991
12992 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12993         /* Broadcom boards. */
12994         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12995           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12996         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12997           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12998         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12999           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13000         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13001           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13002         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13003           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13005           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13007           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13009           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13011           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13013           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13015           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13016
13017         /* 3com boards. */
13018         { TG3PCI_SUBVENDOR_ID_3COM,
13019           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13020         { TG3PCI_SUBVENDOR_ID_3COM,
13021           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13022         { TG3PCI_SUBVENDOR_ID_3COM,
13023           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13024         { TG3PCI_SUBVENDOR_ID_3COM,
13025           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13026         { TG3PCI_SUBVENDOR_ID_3COM,
13027           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13028
13029         /* DELL boards. */
13030         { TG3PCI_SUBVENDOR_ID_DELL,
13031           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13032         { TG3PCI_SUBVENDOR_ID_DELL,
13033           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13034         { TG3PCI_SUBVENDOR_ID_DELL,
13035           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13036         { TG3PCI_SUBVENDOR_ID_DELL,
13037           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13038
13039         /* Compaq boards. */
13040         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13041           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13042         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13043           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13044         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13045           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13046         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13047           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13048         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13049           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13050
13051         /* IBM boards. */
13052         { TG3PCI_SUBVENDOR_ID_IBM,
13053           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13054 };
13055
13056 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13057 {
13058         int i;
13059
13060         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13061                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13062                      tp->pdev->subsystem_vendor) &&
13063                     (subsys_id_to_phy_id[i].subsys_devid ==
13064                      tp->pdev->subsystem_device))
13065                         return &subsys_id_to_phy_id[i];
13066         }
13067         return NULL;
13068 }
13069
13070 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13071 {
13072         u32 val;
13073
13074         tp->phy_id = TG3_PHY_ID_INVALID;
13075         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13076
13077         /* Assume an onboard device and WOL capable by default.  */
13078         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13079         tg3_flag_set(tp, WOL_CAP);
13080
13081         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13082                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13083                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13084                         tg3_flag_set(tp, IS_NIC);
13085                 }
13086                 val = tr32(VCPU_CFGSHDW);
13087                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13088                         tg3_flag_set(tp, ASPM_WORKAROUND);
13089                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13090                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13091                         tg3_flag_set(tp, WOL_ENABLE);
13092                         device_set_wakeup_enable(&tp->pdev->dev, true);
13093                 }
13094                 goto done;
13095         }
13096
13097         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13098         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13099                 u32 nic_cfg, led_cfg;
13100                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13101                 int eeprom_phy_serdes = 0;
13102
13103                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13104                 tp->nic_sram_data_cfg = nic_cfg;
13105
13106                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13107                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13108                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13109                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13110                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13111                     (ver > 0) && (ver < 0x100))
13112                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13113
13114                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13115                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13116
13117                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13118                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13119                         eeprom_phy_serdes = 1;
13120
13121                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13122                 if (nic_phy_id != 0) {
13123                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13124                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13125
13126                         eeprom_phy_id  = (id1 >> 16) << 10;
13127                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13128                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13129                 } else
13130                         eeprom_phy_id = 0;
13131
13132                 tp->phy_id = eeprom_phy_id;
13133                 if (eeprom_phy_serdes) {
13134                         if (!tg3_flag(tp, 5705_PLUS))
13135                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13136                         else
13137                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13138                 }
13139
13140                 if (tg3_flag(tp, 5750_PLUS))
13141                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13142                                     SHASTA_EXT_LED_MODE_MASK);
13143                 else
13144                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13145
13146                 switch (led_cfg) {
13147                 default:
13148                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13149                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13150                         break;
13151
13152                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13153                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13154                         break;
13155
13156                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13157                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13158
13159                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13160                          * read on some older 5700/5701 bootcode.
13161                          */
13162                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13163                             ASIC_REV_5700 ||
13164                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13165                             ASIC_REV_5701)
13166                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13167
13168                         break;
13169
13170                 case SHASTA_EXT_LED_SHARED:
13171                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13172                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13173                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13174                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13175                                                  LED_CTRL_MODE_PHY_2);
13176                         break;
13177
13178                 case SHASTA_EXT_LED_MAC:
13179                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13180                         break;
13181
13182                 case SHASTA_EXT_LED_COMBO:
13183                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13184                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13185                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13186                                                  LED_CTRL_MODE_PHY_2);
13187                         break;
13188
13189                 }
13190
13191                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13192                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13193                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13194                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13195
13196                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13197                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13198
13199                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13200                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13201                         if ((tp->pdev->subsystem_vendor ==
13202                              PCI_VENDOR_ID_ARIMA) &&
13203                             (tp->pdev->subsystem_device == 0x205a ||
13204                              tp->pdev->subsystem_device == 0x2063))
13205                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13206                 } else {
13207                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13208                         tg3_flag_set(tp, IS_NIC);
13209                 }
13210
13211                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13212                         tg3_flag_set(tp, ENABLE_ASF);
13213                         if (tg3_flag(tp, 5750_PLUS))
13214                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13215                 }
13216
13217                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13218                     tg3_flag(tp, 5750_PLUS))
13219                         tg3_flag_set(tp, ENABLE_APE);
13220
13221                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13222                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13223                         tg3_flag_clear(tp, WOL_CAP);
13224
13225                 if (tg3_flag(tp, WOL_CAP) &&
13226                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13227                         tg3_flag_set(tp, WOL_ENABLE);
13228                         device_set_wakeup_enable(&tp->pdev->dev, true);
13229                 }
13230
13231                 if (cfg2 & (1 << 17))
13232                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13233
13234                 /* serdes signal pre-emphasis in register 0x590 set by */
13235                 /* bootcode if bit 18 is set */
13236                 if (cfg2 & (1 << 18))
13237                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13238
13239                 if ((tg3_flag(tp, 57765_PLUS) ||
13240                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13241                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13242                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13243                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13244
13245                 if (tg3_flag(tp, PCI_EXPRESS) &&
13246                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13247                     !tg3_flag(tp, 57765_PLUS)) {
13248                         u32 cfg3;
13249
13250                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13251                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13252                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13253                 }
13254
13255                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13256                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13257                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13258                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13259                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13260                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13261         }
13262 done:
13263         if (tg3_flag(tp, WOL_CAP))
13264                 device_set_wakeup_enable(&tp->pdev->dev,
13265                                          tg3_flag(tp, WOL_ENABLE));
13266         else
13267                 device_set_wakeup_capable(&tp->pdev->dev, false);
13268 }
13269
13270 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13271 {
13272         int i;
13273         u32 val;
13274
13275         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13276         tw32(OTP_CTRL, cmd);
13277
13278         /* Wait for up to 1 ms for command to execute. */
13279         for (i = 0; i < 100; i++) {
13280                 val = tr32(OTP_STATUS);
13281                 if (val & OTP_STATUS_CMD_DONE)
13282                         break;
13283                 udelay(10);
13284         }
13285
13286         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13287 }
13288
13289 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13290  * configuration is a 32-bit value that straddles the alignment boundary.
13291  * We do two 32-bit reads and then shift and merge the results.
13292  */
13293 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13294 {
13295         u32 bhalf_otp, thalf_otp;
13296
13297         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13298
13299         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13300                 return 0;
13301
13302         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13303
13304         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13305                 return 0;
13306
13307         thalf_otp = tr32(OTP_READ_DATA);
13308
13309         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13310
13311         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13312                 return 0;
13313
13314         bhalf_otp = tr32(OTP_READ_DATA);
13315
13316         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13317 }
13318
13319 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13320 {
13321         u32 adv = ADVERTISED_Autoneg;
13322
13323         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13324                 adv |= ADVERTISED_1000baseT_Half |
13325                        ADVERTISED_1000baseT_Full;
13326
13327         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13328                 adv |= ADVERTISED_100baseT_Half |
13329                        ADVERTISED_100baseT_Full |
13330                        ADVERTISED_10baseT_Half |
13331                        ADVERTISED_10baseT_Full |
13332                        ADVERTISED_TP;
13333         else
13334                 adv |= ADVERTISED_FIBRE;
13335
13336         tp->link_config.advertising = adv;
13337         tp->link_config.speed = SPEED_INVALID;
13338         tp->link_config.duplex = DUPLEX_INVALID;
13339         tp->link_config.autoneg = AUTONEG_ENABLE;
13340         tp->link_config.active_speed = SPEED_INVALID;
13341         tp->link_config.active_duplex = DUPLEX_INVALID;
13342         tp->link_config.orig_speed = SPEED_INVALID;
13343         tp->link_config.orig_duplex = DUPLEX_INVALID;
13344         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13345 }
13346
13347 static int __devinit tg3_phy_probe(struct tg3 *tp)
13348 {
13349         u32 hw_phy_id_1, hw_phy_id_2;
13350         u32 hw_phy_id, hw_phy_id_masked;
13351         int err;
13352
13353         /* flow control autonegotiation is default behavior */
13354         tg3_flag_set(tp, PAUSE_AUTONEG);
13355         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13356
13357         if (tg3_flag(tp, USE_PHYLIB))
13358                 return tg3_phy_init(tp);
13359
13360         /* Reading the PHY ID register can conflict with ASF
13361          * firmware access to the PHY hardware.
13362          */
13363         err = 0;
13364         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13365                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13366         } else {
13367                 /* Now read the physical PHY_ID from the chip and verify
13368                  * that it is sane.  If it doesn't look good, we fall back
13369                  * to either the hard-coded table based PHY_ID and failing
13370                  * that the value found in the eeprom area.
13371                  */
13372                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13373                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13374
13375                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13376                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13377                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13378
13379                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13380         }
13381
13382         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13383                 tp->phy_id = hw_phy_id;
13384                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13385                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13386                 else
13387                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13388         } else {
13389                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13390                         /* Do nothing, phy ID already set up in
13391                          * tg3_get_eeprom_hw_cfg().
13392                          */
13393                 } else {
13394                         struct subsys_tbl_ent *p;
13395
13396                         /* No eeprom signature?  Try the hardcoded
13397                          * subsys device table.
13398                          */
13399                         p = tg3_lookup_by_subsys(tp);
13400                         if (!p)
13401                                 return -ENODEV;
13402
13403                         tp->phy_id = p->phy_id;
13404                         if (!tp->phy_id ||
13405                             tp->phy_id == TG3_PHY_ID_BCM8002)
13406                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13407                 }
13408         }
13409
13410         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13411             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13412              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13413              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13414               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13415              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13416               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13417                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13418
13419         tg3_phy_init_link_config(tp);
13420
13421         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13422             !tg3_flag(tp, ENABLE_APE) &&
13423             !tg3_flag(tp, ENABLE_ASF)) {
13424                 u32 bmsr, dummy;
13425
13426                 tg3_readphy(tp, MII_BMSR, &bmsr);
13427                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13428                     (bmsr & BMSR_LSTATUS))
13429                         goto skip_phy_reset;
13430
13431                 err = tg3_phy_reset(tp);
13432                 if (err)
13433                         return err;
13434
13435                 tg3_phy_set_wirespeed(tp);
13436
13437                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13438                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13439                                             tp->link_config.flowctrl);
13440
13441                         tg3_writephy(tp, MII_BMCR,
13442                                      BMCR_ANENABLE | BMCR_ANRESTART);
13443                 }
13444         }
13445
13446 skip_phy_reset:
13447         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13448                 err = tg3_init_5401phy_dsp(tp);
13449                 if (err)
13450                         return err;
13451
13452                 err = tg3_init_5401phy_dsp(tp);
13453         }
13454
13455         return err;
13456 }
13457
13458 static void __devinit tg3_read_vpd(struct tg3 *tp)
13459 {
13460         u8 *vpd_data;
13461         unsigned int block_end, rosize, len;
13462         u32 vpdlen;
13463         int j, i = 0;
13464
13465         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13466         if (!vpd_data)
13467                 goto out_no_vpd;
13468
13469         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13470         if (i < 0)
13471                 goto out_not_found;
13472
13473         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13474         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13475         i += PCI_VPD_LRDT_TAG_SIZE;
13476
13477         if (block_end > vpdlen)
13478                 goto out_not_found;
13479
13480         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13481                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13482         if (j > 0) {
13483                 len = pci_vpd_info_field_size(&vpd_data[j]);
13484
13485                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13486                 if (j + len > block_end || len != 4 ||
13487                     memcmp(&vpd_data[j], "1028", 4))
13488                         goto partno;
13489
13490                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13491                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13492                 if (j < 0)
13493                         goto partno;
13494
13495                 len = pci_vpd_info_field_size(&vpd_data[j]);
13496
13497                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13498                 if (j + len > block_end)
13499                         goto partno;
13500
13501                 memcpy(tp->fw_ver, &vpd_data[j], len);
13502                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13503         }
13504
13505 partno:
13506         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13507                                       PCI_VPD_RO_KEYWORD_PARTNO);
13508         if (i < 0)
13509                 goto out_not_found;
13510
13511         len = pci_vpd_info_field_size(&vpd_data[i]);
13512
13513         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13514         if (len > TG3_BPN_SIZE ||
13515             (len + i) > vpdlen)
13516                 goto out_not_found;
13517
13518         memcpy(tp->board_part_number, &vpd_data[i], len);
13519
13520 out_not_found:
13521         kfree(vpd_data);
13522         if (tp->board_part_number[0])
13523                 return;
13524
13525 out_no_vpd:
13526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13527                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13528                         strcpy(tp->board_part_number, "BCM5717");
13529                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13530                         strcpy(tp->board_part_number, "BCM5718");
13531                 else
13532                         goto nomatch;
13533         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13534                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13535                         strcpy(tp->board_part_number, "BCM57780");
13536                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13537                         strcpy(tp->board_part_number, "BCM57760");
13538                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13539                         strcpy(tp->board_part_number, "BCM57790");
13540                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13541                         strcpy(tp->board_part_number, "BCM57788");
13542                 else
13543                         goto nomatch;
13544         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13545                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13546                         strcpy(tp->board_part_number, "BCM57761");
13547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13548                         strcpy(tp->board_part_number, "BCM57765");
13549                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13550                         strcpy(tp->board_part_number, "BCM57781");
13551                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13552                         strcpy(tp->board_part_number, "BCM57785");
13553                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13554                         strcpy(tp->board_part_number, "BCM57791");
13555                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13556                         strcpy(tp->board_part_number, "BCM57795");
13557                 else
13558                         goto nomatch;
13559         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13560                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13561                         strcpy(tp->board_part_number, "BCM57762");
13562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13563                         strcpy(tp->board_part_number, "BCM57766");
13564                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13565                         strcpy(tp->board_part_number, "BCM57782");
13566                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13567                         strcpy(tp->board_part_number, "BCM57786");
13568                 else
13569                         goto nomatch;
13570         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13571                 strcpy(tp->board_part_number, "BCM95906");
13572         } else {
13573 nomatch:
13574                 strcpy(tp->board_part_number, "none");
13575         }
13576 }
13577
13578 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13579 {
13580         u32 val;
13581
13582         if (tg3_nvram_read(tp, offset, &val) ||
13583             (val & 0xfc000000) != 0x0c000000 ||
13584             tg3_nvram_read(tp, offset + 4, &val) ||
13585             val != 0)
13586                 return 0;
13587
13588         return 1;
13589 }
13590
13591 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13592 {
13593         u32 val, offset, start, ver_offset;
13594         int i, dst_off;
13595         bool newver = false;
13596
13597         if (tg3_nvram_read(tp, 0xc, &offset) ||
13598             tg3_nvram_read(tp, 0x4, &start))
13599                 return;
13600
13601         offset = tg3_nvram_logical_addr(tp, offset);
13602
13603         if (tg3_nvram_read(tp, offset, &val))
13604                 return;
13605
13606         if ((val & 0xfc000000) == 0x0c000000) {
13607                 if (tg3_nvram_read(tp, offset + 4, &val))
13608                         return;
13609
13610                 if (val == 0)
13611                         newver = true;
13612         }
13613
13614         dst_off = strlen(tp->fw_ver);
13615
13616         if (newver) {
13617                 if (TG3_VER_SIZE - dst_off < 16 ||
13618                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13619                         return;
13620
13621                 offset = offset + ver_offset - start;
13622                 for (i = 0; i < 16; i += 4) {
13623                         __be32 v;
13624                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13625                                 return;
13626
13627                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13628                 }
13629         } else {
13630                 u32 major, minor;
13631
13632                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13633                         return;
13634
13635                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13636                         TG3_NVM_BCVER_MAJSFT;
13637                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13638                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13639                          "v%d.%02d", major, minor);
13640         }
13641 }
13642
13643 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13644 {
13645         u32 val, major, minor;
13646
13647         /* Use native endian representation */
13648         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13649                 return;
13650
13651         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13652                 TG3_NVM_HWSB_CFG1_MAJSFT;
13653         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13654                 TG3_NVM_HWSB_CFG1_MINSFT;
13655
13656         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13657 }
13658
13659 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13660 {
13661         u32 offset, major, minor, build;
13662
13663         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13664
13665         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13666                 return;
13667
13668         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13669         case TG3_EEPROM_SB_REVISION_0:
13670                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13671                 break;
13672         case TG3_EEPROM_SB_REVISION_2:
13673                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13674                 break;
13675         case TG3_EEPROM_SB_REVISION_3:
13676                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13677                 break;
13678         case TG3_EEPROM_SB_REVISION_4:
13679                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13680                 break;
13681         case TG3_EEPROM_SB_REVISION_5:
13682                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13683                 break;
13684         case TG3_EEPROM_SB_REVISION_6:
13685                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13686                 break;
13687         default:
13688                 return;
13689         }
13690
13691         if (tg3_nvram_read(tp, offset, &val))
13692                 return;
13693
13694         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13695                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13696         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13697                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13698         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13699
13700         if (minor > 99 || build > 26)
13701                 return;
13702
13703         offset = strlen(tp->fw_ver);
13704         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13705                  " v%d.%02d", major, minor);
13706
13707         if (build > 0) {
13708                 offset = strlen(tp->fw_ver);
13709                 if (offset < TG3_VER_SIZE - 1)
13710                         tp->fw_ver[offset] = 'a' + build - 1;
13711         }
13712 }
13713
13714 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13715 {
13716         u32 val, offset, start;
13717         int i, vlen;
13718
13719         for (offset = TG3_NVM_DIR_START;
13720              offset < TG3_NVM_DIR_END;
13721              offset += TG3_NVM_DIRENT_SIZE) {
13722                 if (tg3_nvram_read(tp, offset, &val))
13723                         return;
13724
13725                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13726                         break;
13727         }
13728
13729         if (offset == TG3_NVM_DIR_END)
13730                 return;
13731
13732         if (!tg3_flag(tp, 5705_PLUS))
13733                 start = 0x08000000;
13734         else if (tg3_nvram_read(tp, offset - 4, &start))
13735                 return;
13736
13737         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13738             !tg3_fw_img_is_valid(tp, offset) ||
13739             tg3_nvram_read(tp, offset + 8, &val))
13740                 return;
13741
13742         offset += val - start;
13743
13744         vlen = strlen(tp->fw_ver);
13745
13746         tp->fw_ver[vlen++] = ',';
13747         tp->fw_ver[vlen++] = ' ';
13748
13749         for (i = 0; i < 4; i++) {
13750                 __be32 v;
13751                 if (tg3_nvram_read_be32(tp, offset, &v))
13752                         return;
13753
13754                 offset += sizeof(v);
13755
13756                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13757                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13758                         break;
13759                 }
13760
13761                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13762                 vlen += sizeof(v);
13763         }
13764 }
13765
13766 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13767 {
13768         int vlen;
13769         u32 apedata;
13770         char *fwtype;
13771
13772         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13773                 return;
13774
13775         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13776         if (apedata != APE_SEG_SIG_MAGIC)
13777                 return;
13778
13779         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13780         if (!(apedata & APE_FW_STATUS_READY))
13781                 return;
13782
13783         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13784
13785         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13786                 tg3_flag_set(tp, APE_HAS_NCSI);
13787                 fwtype = "NCSI";
13788         } else {
13789                 fwtype = "DASH";
13790         }
13791
13792         vlen = strlen(tp->fw_ver);
13793
13794         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13795                  fwtype,
13796                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13797                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13798                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13799                  (apedata & APE_FW_VERSION_BLDMSK));
13800 }
13801
13802 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13803 {
13804         u32 val;
13805         bool vpd_vers = false;
13806
13807         if (tp->fw_ver[0] != 0)
13808                 vpd_vers = true;
13809
13810         if (tg3_flag(tp, NO_NVRAM)) {
13811                 strcat(tp->fw_ver, "sb");
13812                 return;
13813         }
13814
13815         if (tg3_nvram_read(tp, 0, &val))
13816                 return;
13817
13818         if (val == TG3_EEPROM_MAGIC)
13819                 tg3_read_bc_ver(tp);
13820         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13821                 tg3_read_sb_ver(tp, val);
13822         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13823                 tg3_read_hwsb_ver(tp);
13824         else
13825                 return;
13826
13827         if (vpd_vers)
13828                 goto done;
13829
13830         if (tg3_flag(tp, ENABLE_APE)) {
13831                 if (tg3_flag(tp, ENABLE_ASF))
13832                         tg3_read_dash_ver(tp);
13833         } else if (tg3_flag(tp, ENABLE_ASF)) {
13834                 tg3_read_mgmtfw_ver(tp);
13835         }
13836
13837 done:
13838         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13839 }
13840
13841 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13842
13843 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13844 {
13845         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13846                 return TG3_RX_RET_MAX_SIZE_5717;
13847         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13848                 return TG3_RX_RET_MAX_SIZE_5700;
13849         else
13850                 return TG3_RX_RET_MAX_SIZE_5705;
13851 }
13852
13853 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13854         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13855         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13856         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13857         { },
13858 };
13859
13860 static int __devinit tg3_get_invariants(struct tg3 *tp)
13861 {
13862         u32 misc_ctrl_reg;
13863         u32 pci_state_reg, grc_misc_cfg;
13864         u32 val;
13865         u16 pci_cmd;
13866         int err;
13867
13868         /* Force memory write invalidate off.  If we leave it on,
13869          * then on 5700_BX chips we have to enable a workaround.
13870          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13871          * to match the cacheline size.  The Broadcom driver have this
13872          * workaround but turns MWI off all the times so never uses
13873          * it.  This seems to suggest that the workaround is insufficient.
13874          */
13875         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13876         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13877         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13878
13879         /* Important! -- Make sure register accesses are byteswapped
13880          * correctly.  Also, for those chips that require it, make
13881          * sure that indirect register accesses are enabled before
13882          * the first operation.
13883          */
13884         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13885                               &misc_ctrl_reg);
13886         tp->misc_host_ctrl |= (misc_ctrl_reg &
13887                                MISC_HOST_CTRL_CHIPREV);
13888         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13889                                tp->misc_host_ctrl);
13890
13891         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13892                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13893         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13894                 u32 prod_id_asic_rev;
13895
13896                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13897                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13898                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13899                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13900                         pci_read_config_dword(tp->pdev,
13901                                               TG3PCI_GEN2_PRODID_ASICREV,
13902                                               &prod_id_asic_rev);
13903                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13904                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13905                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13906                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13907                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13908                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13909                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13910                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13911                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13912                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13913                         pci_read_config_dword(tp->pdev,
13914                                               TG3PCI_GEN15_PRODID_ASICREV,
13915                                               &prod_id_asic_rev);
13916                 else
13917                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13918                                               &prod_id_asic_rev);
13919
13920                 tp->pci_chip_rev_id = prod_id_asic_rev;
13921         }
13922
13923         /* Wrong chip ID in 5752 A0. This code can be removed later
13924          * as A0 is not in production.
13925          */
13926         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13927                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13928
13929         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13930          * we need to disable memory and use config. cycles
13931          * only to access all registers. The 5702/03 chips
13932          * can mistakenly decode the special cycles from the
13933          * ICH chipsets as memory write cycles, causing corruption
13934          * of register and memory space. Only certain ICH bridges
13935          * will drive special cycles with non-zero data during the
13936          * address phase which can fall within the 5703's address
13937          * range. This is not an ICH bug as the PCI spec allows
13938          * non-zero address during special cycles. However, only
13939          * these ICH bridges are known to drive non-zero addresses
13940          * during special cycles.
13941          *
13942          * Since special cycles do not cross PCI bridges, we only
13943          * enable this workaround if the 5703 is on the secondary
13944          * bus of these ICH bridges.
13945          */
13946         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13947             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13948                 static struct tg3_dev_id {
13949                         u32     vendor;
13950                         u32     device;
13951                         u32     rev;
13952                 } ich_chipsets[] = {
13953                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13954                           PCI_ANY_ID },
13955                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13956                           PCI_ANY_ID },
13957                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13958                           0xa },
13959                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13960                           PCI_ANY_ID },
13961                         { },
13962                 };
13963                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13964                 struct pci_dev *bridge = NULL;
13965
13966                 while (pci_id->vendor != 0) {
13967                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13968                                                 bridge);
13969                         if (!bridge) {
13970                                 pci_id++;
13971                                 continue;
13972                         }
13973                         if (pci_id->rev != PCI_ANY_ID) {
13974                                 if (bridge->revision > pci_id->rev)
13975                                         continue;
13976                         }
13977                         if (bridge->subordinate &&
13978                             (bridge->subordinate->number ==
13979                              tp->pdev->bus->number)) {
13980                                 tg3_flag_set(tp, ICH_WORKAROUND);
13981                                 pci_dev_put(bridge);
13982                                 break;
13983                         }
13984                 }
13985         }
13986
13987         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13988                 static struct tg3_dev_id {
13989                         u32     vendor;
13990                         u32     device;
13991                 } bridge_chipsets[] = {
13992                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13993                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13994                         { },
13995                 };
13996                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13997                 struct pci_dev *bridge = NULL;
13998
13999                 while (pci_id->vendor != 0) {
14000                         bridge = pci_get_device(pci_id->vendor,
14001                                                 pci_id->device,
14002                                                 bridge);
14003                         if (!bridge) {
14004                                 pci_id++;
14005                                 continue;
14006                         }
14007                         if (bridge->subordinate &&
14008                             (bridge->subordinate->number <=
14009                              tp->pdev->bus->number) &&
14010                             (bridge->subordinate->subordinate >=
14011                              tp->pdev->bus->number)) {
14012                                 tg3_flag_set(tp, 5701_DMA_BUG);
14013                                 pci_dev_put(bridge);
14014                                 break;
14015                         }
14016                 }
14017         }
14018
14019         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14020          * DMA addresses > 40-bit. This bridge may have other additional
14021          * 57xx devices behind it in some 4-port NIC designs for example.
14022          * Any tg3 device found behind the bridge will also need the 40-bit
14023          * DMA workaround.
14024          */
14025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14026             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14027                 tg3_flag_set(tp, 5780_CLASS);
14028                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14029                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14030         } else {
14031                 struct pci_dev *bridge = NULL;
14032
14033                 do {
14034                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14035                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14036                                                 bridge);
14037                         if (bridge && bridge->subordinate &&
14038                             (bridge->subordinate->number <=
14039                              tp->pdev->bus->number) &&
14040                             (bridge->subordinate->subordinate >=
14041                              tp->pdev->bus->number)) {
14042                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14043                                 pci_dev_put(bridge);
14044                                 break;
14045                         }
14046                 } while (bridge);
14047         }
14048
14049         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14050             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14051                 tp->pdev_peer = tg3_find_peer(tp);
14052
14053         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14055             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14056                 tg3_flag_set(tp, 5717_PLUS);
14057
14058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14059             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14060                 tg3_flag_set(tp, 57765_CLASS);
14061
14062         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14063                 tg3_flag_set(tp, 57765_PLUS);
14064
14065         /* Intentionally exclude ASIC_REV_5906 */
14066         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14067             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14068             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14069             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14070             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14071             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14072             tg3_flag(tp, 57765_PLUS))
14073                 tg3_flag_set(tp, 5755_PLUS);
14074
14075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14076             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14077             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14078             tg3_flag(tp, 5755_PLUS) ||
14079             tg3_flag(tp, 5780_CLASS))
14080                 tg3_flag_set(tp, 5750_PLUS);
14081
14082         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14083             tg3_flag(tp, 5750_PLUS))
14084                 tg3_flag_set(tp, 5705_PLUS);
14085
14086         /* Determine TSO capabilities */
14087         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14088                 ; /* Do nothing. HW bug. */
14089         else if (tg3_flag(tp, 57765_PLUS))
14090                 tg3_flag_set(tp, HW_TSO_3);
14091         else if (tg3_flag(tp, 5755_PLUS) ||
14092                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14093                 tg3_flag_set(tp, HW_TSO_2);
14094         else if (tg3_flag(tp, 5750_PLUS)) {
14095                 tg3_flag_set(tp, HW_TSO_1);
14096                 tg3_flag_set(tp, TSO_BUG);
14097                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14098                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14099                         tg3_flag_clear(tp, TSO_BUG);
14100         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14101                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14102                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14103                         tg3_flag_set(tp, TSO_BUG);
14104                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14105                         tp->fw_needed = FIRMWARE_TG3TSO5;
14106                 else
14107                         tp->fw_needed = FIRMWARE_TG3TSO;
14108         }
14109
14110         /* Selectively allow TSO based on operating conditions */
14111         if (tg3_flag(tp, HW_TSO_1) ||
14112             tg3_flag(tp, HW_TSO_2) ||
14113             tg3_flag(tp, HW_TSO_3) ||
14114             tp->fw_needed) {
14115                 /* For firmware TSO, assume ASF is disabled.
14116                  * We'll disable TSO later if we discover ASF
14117                  * is enabled in tg3_get_eeprom_hw_cfg().
14118                  */
14119                 tg3_flag_set(tp, TSO_CAPABLE);
14120         } else {
14121                 tg3_flag_clear(tp, TSO_CAPABLE);
14122                 tg3_flag_clear(tp, TSO_BUG);
14123                 tp->fw_needed = NULL;
14124         }
14125
14126         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14127                 tp->fw_needed = FIRMWARE_TG3;
14128
14129         tp->irq_max = 1;
14130
14131         if (tg3_flag(tp, 5750_PLUS)) {
14132                 tg3_flag_set(tp, SUPPORT_MSI);
14133                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14134                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14135                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14136                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14137                      tp->pdev_peer == tp->pdev))
14138                         tg3_flag_clear(tp, SUPPORT_MSI);
14139
14140                 if (tg3_flag(tp, 5755_PLUS) ||
14141                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14142                         tg3_flag_set(tp, 1SHOT_MSI);
14143                 }
14144
14145                 if (tg3_flag(tp, 57765_PLUS)) {
14146                         tg3_flag_set(tp, SUPPORT_MSIX);
14147                         tp->irq_max = TG3_IRQ_MAX_VECS;
14148                         tg3_rss_init_dflt_indir_tbl(tp);
14149                 }
14150         }
14151
14152         if (tg3_flag(tp, 5755_PLUS))
14153                 tg3_flag_set(tp, SHORT_DMA_BUG);
14154
14155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14156                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14157         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14158                 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14159
14160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14161             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14163                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14164
14165         if (tg3_flag(tp, 57765_PLUS) &&
14166             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14167                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14168
14169         if (!tg3_flag(tp, 5705_PLUS) ||
14170             tg3_flag(tp, 5780_CLASS) ||
14171             tg3_flag(tp, USE_JUMBO_BDFLAG))
14172                 tg3_flag_set(tp, JUMBO_CAPABLE);
14173
14174         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14175                               &pci_state_reg);
14176
14177         if (pci_is_pcie(tp->pdev)) {
14178                 u16 lnkctl;
14179
14180                 tg3_flag_set(tp, PCI_EXPRESS);
14181
14182                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14183                         int readrq = pcie_get_readrq(tp->pdev);
14184                         if (readrq > 2048)
14185                                 pcie_set_readrq(tp->pdev, 2048);
14186                 }
14187
14188                 pci_read_config_word(tp->pdev,
14189                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14190                                      &lnkctl);
14191                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14192                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14193                             ASIC_REV_5906) {
14194                                 tg3_flag_clear(tp, HW_TSO_2);
14195                                 tg3_flag_clear(tp, TSO_CAPABLE);
14196                         }
14197                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14198                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14199                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14200                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14201                                 tg3_flag_set(tp, CLKREQ_BUG);
14202                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14203                         tg3_flag_set(tp, L1PLLPD_EN);
14204                 }
14205         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14206                 /* BCM5785 devices are effectively PCIe devices, and should
14207                  * follow PCIe codepaths, but do not have a PCIe capabilities
14208                  * section.
14209                  */
14210                 tg3_flag_set(tp, PCI_EXPRESS);
14211         } else if (!tg3_flag(tp, 5705_PLUS) ||
14212                    tg3_flag(tp, 5780_CLASS)) {
14213                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14214                 if (!tp->pcix_cap) {
14215                         dev_err(&tp->pdev->dev,
14216                                 "Cannot find PCI-X capability, aborting\n");
14217                         return -EIO;
14218                 }
14219
14220                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14221                         tg3_flag_set(tp, PCIX_MODE);
14222         }
14223
14224         /* If we have an AMD 762 or VIA K8T800 chipset, write
14225          * reordering to the mailbox registers done by the host
14226          * controller can cause major troubles.  We read back from
14227          * every mailbox register write to force the writes to be
14228          * posted to the chip in order.
14229          */
14230         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14231             !tg3_flag(tp, PCI_EXPRESS))
14232                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14233
14234         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14235                              &tp->pci_cacheline_sz);
14236         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14237                              &tp->pci_lat_timer);
14238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14239             tp->pci_lat_timer < 64) {
14240                 tp->pci_lat_timer = 64;
14241                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14242                                       tp->pci_lat_timer);
14243         }
14244
14245         /* Important! -- It is critical that the PCI-X hw workaround
14246          * situation is decided before the first MMIO register access.
14247          */
14248         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14249                 /* 5700 BX chips need to have their TX producer index
14250                  * mailboxes written twice to workaround a bug.
14251                  */
14252                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14253
14254                 /* If we are in PCI-X mode, enable register write workaround.
14255                  *
14256                  * The workaround is to use indirect register accesses
14257                  * for all chip writes not to mailbox registers.
14258                  */
14259                 if (tg3_flag(tp, PCIX_MODE)) {
14260                         u32 pm_reg;
14261
14262                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14263
14264                         /* The chip can have it's power management PCI config
14265                          * space registers clobbered due to this bug.
14266                          * So explicitly force the chip into D0 here.
14267                          */
14268                         pci_read_config_dword(tp->pdev,
14269                                               tp->pm_cap + PCI_PM_CTRL,
14270                                               &pm_reg);
14271                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14272                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14273                         pci_write_config_dword(tp->pdev,
14274                                                tp->pm_cap + PCI_PM_CTRL,
14275                                                pm_reg);
14276
14277                         /* Also, force SERR#/PERR# in PCI command. */
14278                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14279                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14280                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14281                 }
14282         }
14283
14284         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14285                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14286         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14287                 tg3_flag_set(tp, PCI_32BIT);
14288
14289         /* Chip-specific fixup from Broadcom driver */
14290         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14291             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14292                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14293                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14294         }
14295
14296         /* Default fast path register access methods */
14297         tp->read32 = tg3_read32;
14298         tp->write32 = tg3_write32;
14299         tp->read32_mbox = tg3_read32;
14300         tp->write32_mbox = tg3_write32;
14301         tp->write32_tx_mbox = tg3_write32;
14302         tp->write32_rx_mbox = tg3_write32;
14303
14304         /* Various workaround register access methods */
14305         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14306                 tp->write32 = tg3_write_indirect_reg32;
14307         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14308                  (tg3_flag(tp, PCI_EXPRESS) &&
14309                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14310                 /*
14311                  * Back to back register writes can cause problems on these
14312                  * chips, the workaround is to read back all reg writes
14313                  * except those to mailbox regs.
14314                  *
14315                  * See tg3_write_indirect_reg32().
14316                  */
14317                 tp->write32 = tg3_write_flush_reg32;
14318         }
14319
14320         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14321                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14322                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14323                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14324         }
14325
14326         if (tg3_flag(tp, ICH_WORKAROUND)) {
14327                 tp->read32 = tg3_read_indirect_reg32;
14328                 tp->write32 = tg3_write_indirect_reg32;
14329                 tp->read32_mbox = tg3_read_indirect_mbox;
14330                 tp->write32_mbox = tg3_write_indirect_mbox;
14331                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14332                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14333
14334                 iounmap(tp->regs);
14335                 tp->regs = NULL;
14336
14337                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14338                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14339                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14340         }
14341         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14342                 tp->read32_mbox = tg3_read32_mbox_5906;
14343                 tp->write32_mbox = tg3_write32_mbox_5906;
14344                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14345                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14346         }
14347
14348         if (tp->write32 == tg3_write_indirect_reg32 ||
14349             (tg3_flag(tp, PCIX_MODE) &&
14350              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14351               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14352                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14353
14354         /* The memory arbiter has to be enabled in order for SRAM accesses
14355          * to succeed.  Normally on powerup the tg3 chip firmware will make
14356          * sure it is enabled, but other entities such as system netboot
14357          * code might disable it.
14358          */
14359         val = tr32(MEMARB_MODE);
14360         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14361
14362         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14364             tg3_flag(tp, 5780_CLASS)) {
14365                 if (tg3_flag(tp, PCIX_MODE)) {
14366                         pci_read_config_dword(tp->pdev,
14367                                               tp->pcix_cap + PCI_X_STATUS,
14368                                               &val);
14369                         tp->pci_fn = val & 0x7;
14370                 }
14371         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14372                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14373                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14374                     NIC_SRAM_CPMUSTAT_SIG) {
14375                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14376                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14377                 }
14378         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14379                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14380                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14381                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14382                     NIC_SRAM_CPMUSTAT_SIG) {
14383                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14384                                      TG3_CPMU_STATUS_FSHFT_5719;
14385                 }
14386         }
14387
14388         /* Get eeprom hw config before calling tg3_set_power_state().
14389          * In particular, the TG3_FLAG_IS_NIC flag must be
14390          * determined before calling tg3_set_power_state() so that
14391          * we know whether or not to switch out of Vaux power.
14392          * When the flag is set, it means that GPIO1 is used for eeprom
14393          * write protect and also implies that it is a LOM where GPIOs
14394          * are not used to switch power.
14395          */
14396         tg3_get_eeprom_hw_cfg(tp);
14397
14398         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14399                 tg3_flag_clear(tp, TSO_CAPABLE);
14400                 tg3_flag_clear(tp, TSO_BUG);
14401                 tp->fw_needed = NULL;
14402         }
14403
14404         if (tg3_flag(tp, ENABLE_APE)) {
14405                 /* Allow reads and writes to the
14406                  * APE register and memory space.
14407                  */
14408                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14409                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14410                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14411                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14412                                        pci_state_reg);
14413
14414                 tg3_ape_lock_init(tp);
14415         }
14416
14417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14418             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14419             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14420             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14421             tg3_flag(tp, 57765_PLUS))
14422                 tg3_flag_set(tp, CPMU_PRESENT);
14423
14424         /* Set up tp->grc_local_ctrl before calling
14425          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14426          * will bring 5700's external PHY out of reset.
14427          * It is also used as eeprom write protect on LOMs.
14428          */
14429         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14431             tg3_flag(tp, EEPROM_WRITE_PROT))
14432                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14433                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14434         /* Unused GPIO3 must be driven as output on 5752 because there
14435          * are no pull-up resistors on unused GPIO pins.
14436          */
14437         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14438                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14439
14440         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14441             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14442             tg3_flag(tp, 57765_CLASS))
14443                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14444
14445         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14446             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14447                 /* Turn off the debug UART. */
14448                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14449                 if (tg3_flag(tp, IS_NIC))
14450                         /* Keep VMain power. */
14451                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14452                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14453         }
14454
14455         /* Switch out of Vaux if it is a NIC */
14456         tg3_pwrsrc_switch_to_vmain(tp);
14457
14458         /* Derive initial jumbo mode from MTU assigned in
14459          * ether_setup() via the alloc_etherdev() call
14460          */
14461         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14462                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14463
14464         /* Determine WakeOnLan speed to use. */
14465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14466             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14467             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14468             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14469                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14470         } else {
14471                 tg3_flag_set(tp, WOL_SPEED_100MB);
14472         }
14473
14474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14475                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14476
14477         /* A few boards don't want Ethernet@WireSpeed phy feature */
14478         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14479             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14480              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14481              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14482             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14483             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14484                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14485
14486         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14487             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14488                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14489         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14490                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14491
14492         if (tg3_flag(tp, 5705_PLUS) &&
14493             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14494             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14495             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14496             !tg3_flag(tp, 57765_PLUS)) {
14497                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14498                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14499                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14500                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14501                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14502                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14503                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14504                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14505                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14506                 } else
14507                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14508         }
14509
14510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14511             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14512                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14513                 if (tp->phy_otp == 0)
14514                         tp->phy_otp = TG3_OTP_DEFAULT;
14515         }
14516
14517         if (tg3_flag(tp, CPMU_PRESENT))
14518                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14519         else
14520                 tp->mi_mode = MAC_MI_MODE_BASE;
14521
14522         tp->coalesce_mode = 0;
14523         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14524             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14525                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14526
14527         /* Set these bits to enable statistics workaround. */
14528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14529             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14530             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14531                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14532                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14533         }
14534
14535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14536             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14537                 tg3_flag_set(tp, USE_PHYLIB);
14538
14539         err = tg3_mdio_init(tp);
14540         if (err)
14541                 return err;
14542
14543         /* Initialize data/descriptor byte/word swapping. */
14544         val = tr32(GRC_MODE);
14545         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14546                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14547                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14548                         GRC_MODE_B2HRX_ENABLE |
14549                         GRC_MODE_HTX2B_ENABLE |
14550                         GRC_MODE_HOST_STACKUP);
14551         else
14552                 val &= GRC_MODE_HOST_STACKUP;
14553
14554         tw32(GRC_MODE, val | tp->grc_mode);
14555
14556         tg3_switch_clocks(tp);
14557
14558         /* Clear this out for sanity. */
14559         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14560
14561         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14562                               &pci_state_reg);
14563         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14564             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14565                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14566
14567                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14568                     chiprevid == CHIPREV_ID_5701_B0 ||
14569                     chiprevid == CHIPREV_ID_5701_B2 ||
14570                     chiprevid == CHIPREV_ID_5701_B5) {
14571                         void __iomem *sram_base;
14572
14573                         /* Write some dummy words into the SRAM status block
14574                          * area, see if it reads back correctly.  If the return
14575                          * value is bad, force enable the PCIX workaround.
14576                          */
14577                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14578
14579                         writel(0x00000000, sram_base);
14580                         writel(0x00000000, sram_base + 4);
14581                         writel(0xffffffff, sram_base + 4);
14582                         if (readl(sram_base) != 0x00000000)
14583                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14584                 }
14585         }
14586
14587         udelay(50);
14588         tg3_nvram_init(tp);
14589
14590         grc_misc_cfg = tr32(GRC_MISC_CFG);
14591         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14592
14593         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14594             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14595              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14596                 tg3_flag_set(tp, IS_5788);
14597
14598         if (!tg3_flag(tp, IS_5788) &&
14599             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14600                 tg3_flag_set(tp, TAGGED_STATUS);
14601         if (tg3_flag(tp, TAGGED_STATUS)) {
14602                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14603                                       HOSTCC_MODE_CLRTICK_TXBD);
14604
14605                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14606                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14607                                        tp->misc_host_ctrl);
14608         }
14609
14610         /* Preserve the APE MAC_MODE bits */
14611         if (tg3_flag(tp, ENABLE_APE))
14612                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14613         else
14614                 tp->mac_mode = 0;
14615
14616         /* these are limited to 10/100 only */
14617         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14618              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14619             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14620              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14621              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14622               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14623               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14624             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14625              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14626               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14627               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14628             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14629             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14630             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14631             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14632                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14633
14634         err = tg3_phy_probe(tp);
14635         if (err) {
14636                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14637                 /* ... but do not return immediately ... */
14638                 tg3_mdio_fini(tp);
14639         }
14640
14641         tg3_read_vpd(tp);
14642         tg3_read_fw_ver(tp);
14643
14644         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14645                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14646         } else {
14647                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14648                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14649                 else
14650                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14651         }
14652
14653         /* 5700 {AX,BX} chips have a broken status block link
14654          * change bit implementation, so we must use the
14655          * status register in those cases.
14656          */
14657         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14658                 tg3_flag_set(tp, USE_LINKCHG_REG);
14659         else
14660                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14661
14662         /* The led_ctrl is set during tg3_phy_probe, here we might
14663          * have to force the link status polling mechanism based
14664          * upon subsystem IDs.
14665          */
14666         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14667             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14668             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14669                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14670                 tg3_flag_set(tp, USE_LINKCHG_REG);
14671         }
14672
14673         /* For all SERDES we poll the MAC status register. */
14674         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14675                 tg3_flag_set(tp, POLL_SERDES);
14676         else
14677                 tg3_flag_clear(tp, POLL_SERDES);
14678
14679         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14680         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14682             tg3_flag(tp, PCIX_MODE)) {
14683                 tp->rx_offset = NET_SKB_PAD;
14684 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14685                 tp->rx_copy_thresh = ~(u16)0;
14686 #endif
14687         }
14688
14689         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14690         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14691         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14692
14693         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14694
14695         /* Increment the rx prod index on the rx std ring by at most
14696          * 8 for these chips to workaround hw errata.
14697          */
14698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14699             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14701                 tp->rx_std_max_post = 8;
14702
14703         if (tg3_flag(tp, ASPM_WORKAROUND))
14704                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14705                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14706
14707         return err;
14708 }
14709
14710 #ifdef CONFIG_SPARC
14711 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14712 {
14713         struct net_device *dev = tp->dev;
14714         struct pci_dev *pdev = tp->pdev;
14715         struct device_node *dp = pci_device_to_OF_node(pdev);
14716         const unsigned char *addr;
14717         int len;
14718
14719         addr = of_get_property(dp, "local-mac-address", &len);
14720         if (addr && len == 6) {
14721                 memcpy(dev->dev_addr, addr, 6);
14722                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14723                 return 0;
14724         }
14725         return -ENODEV;
14726 }
14727
14728 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14729 {
14730         struct net_device *dev = tp->dev;
14731
14732         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14733         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14734         return 0;
14735 }
14736 #endif
14737
14738 static int __devinit tg3_get_device_address(struct tg3 *tp)
14739 {
14740         struct net_device *dev = tp->dev;
14741         u32 hi, lo, mac_offset;
14742         int addr_ok = 0;
14743
14744 #ifdef CONFIG_SPARC
14745         if (!tg3_get_macaddr_sparc(tp))
14746                 return 0;
14747 #endif
14748
14749         mac_offset = 0x7c;
14750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14751             tg3_flag(tp, 5780_CLASS)) {
14752                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14753                         mac_offset = 0xcc;
14754                 if (tg3_nvram_lock(tp))
14755                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14756                 else
14757                         tg3_nvram_unlock(tp);
14758         } else if (tg3_flag(tp, 5717_PLUS)) {
14759                 if (tp->pci_fn & 1)
14760                         mac_offset = 0xcc;
14761                 if (tp->pci_fn > 1)
14762                         mac_offset += 0x18c;
14763         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14764                 mac_offset = 0x10;
14765
14766         /* First try to get it from MAC address mailbox. */
14767         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14768         if ((hi >> 16) == 0x484b) {
14769                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14770                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14771
14772                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14773                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14774                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14775                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14776                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14777
14778                 /* Some old bootcode may report a 0 MAC address in SRAM */
14779                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14780         }
14781         if (!addr_ok) {
14782                 /* Next, try NVRAM. */
14783                 if (!tg3_flag(tp, NO_NVRAM) &&
14784                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14785                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14786                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14787                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14788                 }
14789                 /* Finally just fetch it out of the MAC control regs. */
14790                 else {
14791                         hi = tr32(MAC_ADDR_0_HIGH);
14792                         lo = tr32(MAC_ADDR_0_LOW);
14793
14794                         dev->dev_addr[5] = lo & 0xff;
14795                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14796                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14797                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14798                         dev->dev_addr[1] = hi & 0xff;
14799                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14800                 }
14801         }
14802
14803         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14804 #ifdef CONFIG_SPARC
14805                 if (!tg3_get_default_macaddr_sparc(tp))
14806                         return 0;
14807 #endif
14808                 return -EINVAL;
14809         }
14810         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14811         return 0;
14812 }
14813
14814 #define BOUNDARY_SINGLE_CACHELINE       1
14815 #define BOUNDARY_MULTI_CACHELINE        2
14816
14817 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14818 {
14819         int cacheline_size;
14820         u8 byte;
14821         int goal;
14822
14823         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14824         if (byte == 0)
14825                 cacheline_size = 1024;
14826         else
14827                 cacheline_size = (int) byte * 4;
14828
14829         /* On 5703 and later chips, the boundary bits have no
14830          * effect.
14831          */
14832         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14833             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14834             !tg3_flag(tp, PCI_EXPRESS))
14835                 goto out;
14836
14837 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14838         goal = BOUNDARY_MULTI_CACHELINE;
14839 #else
14840 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14841         goal = BOUNDARY_SINGLE_CACHELINE;
14842 #else
14843         goal = 0;
14844 #endif
14845 #endif
14846
14847         if (tg3_flag(tp, 57765_PLUS)) {
14848                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14849                 goto out;
14850         }
14851
14852         if (!goal)
14853                 goto out;
14854
14855         /* PCI controllers on most RISC systems tend to disconnect
14856          * when a device tries to burst across a cache-line boundary.
14857          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14858          *
14859          * Unfortunately, for PCI-E there are only limited
14860          * write-side controls for this, and thus for reads
14861          * we will still get the disconnects.  We'll also waste
14862          * these PCI cycles for both read and write for chips
14863          * other than 5700 and 5701 which do not implement the
14864          * boundary bits.
14865          */
14866         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14867                 switch (cacheline_size) {
14868                 case 16:
14869                 case 32:
14870                 case 64:
14871                 case 128:
14872                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14873                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14874                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14875                         } else {
14876                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14877                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14878                         }
14879                         break;
14880
14881                 case 256:
14882                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14883                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14884                         break;
14885
14886                 default:
14887                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14888                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14889                         break;
14890                 }
14891         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14892                 switch (cacheline_size) {
14893                 case 16:
14894                 case 32:
14895                 case 64:
14896                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14897                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14898                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14899                                 break;
14900                         }
14901                         /* fallthrough */
14902                 case 128:
14903                 default:
14904                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14905                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14906                         break;
14907                 }
14908         } else {
14909                 switch (cacheline_size) {
14910                 case 16:
14911                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14912                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14913                                         DMA_RWCTRL_WRITE_BNDRY_16);
14914                                 break;
14915                         }
14916                         /* fallthrough */
14917                 case 32:
14918                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14919                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14920                                         DMA_RWCTRL_WRITE_BNDRY_32);
14921                                 break;
14922                         }
14923                         /* fallthrough */
14924                 case 64:
14925                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14926                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14927                                         DMA_RWCTRL_WRITE_BNDRY_64);
14928                                 break;
14929                         }
14930                         /* fallthrough */
14931                 case 128:
14932                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14933                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14934                                         DMA_RWCTRL_WRITE_BNDRY_128);
14935                                 break;
14936                         }
14937                         /* fallthrough */
14938                 case 256:
14939                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14940                                 DMA_RWCTRL_WRITE_BNDRY_256);
14941                         break;
14942                 case 512:
14943                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14944                                 DMA_RWCTRL_WRITE_BNDRY_512);
14945                         break;
14946                 case 1024:
14947                 default:
14948                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14949                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14950                         break;
14951                 }
14952         }
14953
14954 out:
14955         return val;
14956 }
14957
14958 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14959 {
14960         struct tg3_internal_buffer_desc test_desc;
14961         u32 sram_dma_descs;
14962         int i, ret;
14963
14964         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14965
14966         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14967         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14968         tw32(RDMAC_STATUS, 0);
14969         tw32(WDMAC_STATUS, 0);
14970
14971         tw32(BUFMGR_MODE, 0);
14972         tw32(FTQ_RESET, 0);
14973
14974         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14975         test_desc.addr_lo = buf_dma & 0xffffffff;
14976         test_desc.nic_mbuf = 0x00002100;
14977         test_desc.len = size;
14978
14979         /*
14980          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14981          * the *second* time the tg3 driver was getting loaded after an
14982          * initial scan.
14983          *
14984          * Broadcom tells me:
14985          *   ...the DMA engine is connected to the GRC block and a DMA
14986          *   reset may affect the GRC block in some unpredictable way...
14987          *   The behavior of resets to individual blocks has not been tested.
14988          *
14989          * Broadcom noted the GRC reset will also reset all sub-components.
14990          */
14991         if (to_device) {
14992                 test_desc.cqid_sqid = (13 << 8) | 2;
14993
14994                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14995                 udelay(40);
14996         } else {
14997                 test_desc.cqid_sqid = (16 << 8) | 7;
14998
14999                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15000                 udelay(40);
15001         }
15002         test_desc.flags = 0x00000005;
15003
15004         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15005                 u32 val;
15006
15007                 val = *(((u32 *)&test_desc) + i);
15008                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15009                                        sram_dma_descs + (i * sizeof(u32)));
15010                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15011         }
15012         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15013
15014         if (to_device)
15015                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15016         else
15017                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15018
15019         ret = -ENODEV;
15020         for (i = 0; i < 40; i++) {
15021                 u32 val;
15022
15023                 if (to_device)
15024                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15025                 else
15026                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15027                 if ((val & 0xffff) == sram_dma_descs) {
15028                         ret = 0;
15029                         break;
15030                 }
15031
15032                 udelay(100);
15033         }
15034
15035         return ret;
15036 }
15037
15038 #define TEST_BUFFER_SIZE        0x2000
15039
15040 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15041         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15042         { },
15043 };
15044
15045 static int __devinit tg3_test_dma(struct tg3 *tp)
15046 {
15047         dma_addr_t buf_dma;
15048         u32 *buf, saved_dma_rwctrl;
15049         int ret = 0;
15050
15051         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15052                                  &buf_dma, GFP_KERNEL);
15053         if (!buf) {
15054                 ret = -ENOMEM;
15055                 goto out_nofree;
15056         }
15057
15058         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15059                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15060
15061         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15062
15063         if (tg3_flag(tp, 57765_PLUS))
15064                 goto out;
15065
15066         if (tg3_flag(tp, PCI_EXPRESS)) {
15067                 /* DMA read watermark not used on PCIE */
15068                 tp->dma_rwctrl |= 0x00180000;
15069         } else if (!tg3_flag(tp, PCIX_MODE)) {
15070                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15071                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15072                         tp->dma_rwctrl |= 0x003f0000;
15073                 else
15074                         tp->dma_rwctrl |= 0x003f000f;
15075         } else {
15076                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15077                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15078                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15079                         u32 read_water = 0x7;
15080
15081                         /* If the 5704 is behind the EPB bridge, we can
15082                          * do the less restrictive ONE_DMA workaround for
15083                          * better performance.
15084                          */
15085                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15086                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15087                                 tp->dma_rwctrl |= 0x8000;
15088                         else if (ccval == 0x6 || ccval == 0x7)
15089                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15090
15091                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15092                                 read_water = 4;
15093                         /* Set bit 23 to enable PCIX hw bug fix */
15094                         tp->dma_rwctrl |=
15095                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15096                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15097                                 (1 << 23);
15098                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15099                         /* 5780 always in PCIX mode */
15100                         tp->dma_rwctrl |= 0x00144000;
15101                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15102                         /* 5714 always in PCIX mode */
15103                         tp->dma_rwctrl |= 0x00148000;
15104                 } else {
15105                         tp->dma_rwctrl |= 0x001b000f;
15106                 }
15107         }
15108
15109         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15110             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15111                 tp->dma_rwctrl &= 0xfffffff0;
15112
15113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15114             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15115                 /* Remove this if it causes problems for some boards. */
15116                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15117
15118                 /* On 5700/5701 chips, we need to set this bit.
15119                  * Otherwise the chip will issue cacheline transactions
15120                  * to streamable DMA memory with not all the byte
15121                  * enables turned on.  This is an error on several
15122                  * RISC PCI controllers, in particular sparc64.
15123                  *
15124                  * On 5703/5704 chips, this bit has been reassigned
15125                  * a different meaning.  In particular, it is used
15126                  * on those chips to enable a PCI-X workaround.
15127                  */
15128                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15129         }
15130
15131         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15132
15133 #if 0
15134         /* Unneeded, already done by tg3_get_invariants.  */
15135         tg3_switch_clocks(tp);
15136 #endif
15137
15138         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15139             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15140                 goto out;
15141
15142         /* It is best to perform DMA test with maximum write burst size
15143          * to expose the 5700/5701 write DMA bug.
15144          */
15145         saved_dma_rwctrl = tp->dma_rwctrl;
15146         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15147         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15148
15149         while (1) {
15150                 u32 *p = buf, i;
15151
15152                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15153                         p[i] = i;
15154
15155                 /* Send the buffer to the chip. */
15156                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15157                 if (ret) {
15158                         dev_err(&tp->pdev->dev,
15159                                 "%s: Buffer write failed. err = %d\n",
15160                                 __func__, ret);
15161                         break;
15162                 }
15163
15164 #if 0
15165                 /* validate data reached card RAM correctly. */
15166                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15167                         u32 val;
15168                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15169                         if (le32_to_cpu(val) != p[i]) {
15170                                 dev_err(&tp->pdev->dev,
15171                                         "%s: Buffer corrupted on device! "
15172                                         "(%d != %d)\n", __func__, val, i);
15173                                 /* ret = -ENODEV here? */
15174                         }
15175                         p[i] = 0;
15176                 }
15177 #endif
15178                 /* Now read it back. */
15179                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15180                 if (ret) {
15181                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15182                                 "err = %d\n", __func__, ret);
15183                         break;
15184                 }
15185
15186                 /* Verify it. */
15187                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15188                         if (p[i] == i)
15189                                 continue;
15190
15191                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15192                             DMA_RWCTRL_WRITE_BNDRY_16) {
15193                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15194                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15195                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15196                                 break;
15197                         } else {
15198                                 dev_err(&tp->pdev->dev,
15199                                         "%s: Buffer corrupted on read back! "
15200                                         "(%d != %d)\n", __func__, p[i], i);
15201                                 ret = -ENODEV;
15202                                 goto out;
15203                         }
15204                 }
15205
15206                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15207                         /* Success. */
15208                         ret = 0;
15209                         break;
15210                 }
15211         }
15212         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15213             DMA_RWCTRL_WRITE_BNDRY_16) {
15214                 /* DMA test passed without adjusting DMA boundary,
15215                  * now look for chipsets that are known to expose the
15216                  * DMA bug without failing the test.
15217                  */
15218                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15219                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15220                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15221                 } else {
15222                         /* Safe to use the calculated DMA boundary. */
15223                         tp->dma_rwctrl = saved_dma_rwctrl;
15224                 }
15225
15226                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15227         }
15228
15229 out:
15230         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15231 out_nofree:
15232         return ret;
15233 }
15234
15235 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15236 {
15237         if (tg3_flag(tp, 57765_PLUS)) {
15238                 tp->bufmgr_config.mbuf_read_dma_low_water =
15239                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15240                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15241                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15242                 tp->bufmgr_config.mbuf_high_water =
15243                         DEFAULT_MB_HIGH_WATER_57765;
15244
15245                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15246                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15247                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15248                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15249                 tp->bufmgr_config.mbuf_high_water_jumbo =
15250                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15251         } else if (tg3_flag(tp, 5705_PLUS)) {
15252                 tp->bufmgr_config.mbuf_read_dma_low_water =
15253                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15254                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15255                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15256                 tp->bufmgr_config.mbuf_high_water =
15257                         DEFAULT_MB_HIGH_WATER_5705;
15258                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15259                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15260                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15261                         tp->bufmgr_config.mbuf_high_water =
15262                                 DEFAULT_MB_HIGH_WATER_5906;
15263                 }
15264
15265                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15266                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15267                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15268                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15269                 tp->bufmgr_config.mbuf_high_water_jumbo =
15270                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15271         } else {
15272                 tp->bufmgr_config.mbuf_read_dma_low_water =
15273                         DEFAULT_MB_RDMA_LOW_WATER;
15274                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15275                         DEFAULT_MB_MACRX_LOW_WATER;
15276                 tp->bufmgr_config.mbuf_high_water =
15277                         DEFAULT_MB_HIGH_WATER;
15278
15279                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15280                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15281                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15282                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15283                 tp->bufmgr_config.mbuf_high_water_jumbo =
15284                         DEFAULT_MB_HIGH_WATER_JUMBO;
15285         }
15286
15287         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15288         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15289 }
15290
15291 static char * __devinit tg3_phy_string(struct tg3 *tp)
15292 {
15293         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15294         case TG3_PHY_ID_BCM5400:        return "5400";
15295         case TG3_PHY_ID_BCM5401:        return "5401";
15296         case TG3_PHY_ID_BCM5411:        return "5411";
15297         case TG3_PHY_ID_BCM5701:        return "5701";
15298         case TG3_PHY_ID_BCM5703:        return "5703";
15299         case TG3_PHY_ID_BCM5704:        return "5704";
15300         case TG3_PHY_ID_BCM5705:        return "5705";
15301         case TG3_PHY_ID_BCM5750:        return "5750";
15302         case TG3_PHY_ID_BCM5752:        return "5752";
15303         case TG3_PHY_ID_BCM5714:        return "5714";
15304         case TG3_PHY_ID_BCM5780:        return "5780";
15305         case TG3_PHY_ID_BCM5755:        return "5755";
15306         case TG3_PHY_ID_BCM5787:        return "5787";
15307         case TG3_PHY_ID_BCM5784:        return "5784";
15308         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15309         case TG3_PHY_ID_BCM5906:        return "5906";
15310         case TG3_PHY_ID_BCM5761:        return "5761";
15311         case TG3_PHY_ID_BCM5718C:       return "5718C";
15312         case TG3_PHY_ID_BCM5718S:       return "5718S";
15313         case TG3_PHY_ID_BCM57765:       return "57765";
15314         case TG3_PHY_ID_BCM5719C:       return "5719C";
15315         case TG3_PHY_ID_BCM5720C:       return "5720C";
15316         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15317         case 0:                 return "serdes";
15318         default:                return "unknown";
15319         }
15320 }
15321
15322 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15323 {
15324         if (tg3_flag(tp, PCI_EXPRESS)) {
15325                 strcpy(str, "PCI Express");
15326                 return str;
15327         } else if (tg3_flag(tp, PCIX_MODE)) {
15328                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15329
15330                 strcpy(str, "PCIX:");
15331
15332                 if ((clock_ctrl == 7) ||
15333                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15334                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15335                         strcat(str, "133MHz");
15336                 else if (clock_ctrl == 0)
15337                         strcat(str, "33MHz");
15338                 else if (clock_ctrl == 2)
15339                         strcat(str, "50MHz");
15340                 else if (clock_ctrl == 4)
15341                         strcat(str, "66MHz");
15342                 else if (clock_ctrl == 6)
15343                         strcat(str, "100MHz");
15344         } else {
15345                 strcpy(str, "PCI:");
15346                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15347                         strcat(str, "66MHz");
15348                 else
15349                         strcat(str, "33MHz");
15350         }
15351         if (tg3_flag(tp, PCI_32BIT))
15352                 strcat(str, ":32-bit");
15353         else
15354                 strcat(str, ":64-bit");
15355         return str;
15356 }
15357
15358 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15359 {
15360         struct pci_dev *peer;
15361         unsigned int func, devnr = tp->pdev->devfn & ~7;
15362
15363         for (func = 0; func < 8; func++) {
15364                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15365                 if (peer && peer != tp->pdev)
15366                         break;
15367                 pci_dev_put(peer);
15368         }
15369         /* 5704 can be configured in single-port mode, set peer to
15370          * tp->pdev in that case.
15371          */
15372         if (!peer) {
15373                 peer = tp->pdev;
15374                 return peer;
15375         }
15376
15377         /*
15378          * We don't need to keep the refcount elevated; there's no way
15379          * to remove one half of this device without removing the other
15380          */
15381         pci_dev_put(peer);
15382
15383         return peer;
15384 }
15385
15386 static void __devinit tg3_init_coal(struct tg3 *tp)
15387 {
15388         struct ethtool_coalesce *ec = &tp->coal;
15389
15390         memset(ec, 0, sizeof(*ec));
15391         ec->cmd = ETHTOOL_GCOALESCE;
15392         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15393         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15394         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15395         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15396         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15397         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15398         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15399         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15400         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15401
15402         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15403                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15404                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15405                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15406                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15407                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15408         }
15409
15410         if (tg3_flag(tp, 5705_PLUS)) {
15411                 ec->rx_coalesce_usecs_irq = 0;
15412                 ec->tx_coalesce_usecs_irq = 0;
15413                 ec->stats_block_coalesce_usecs = 0;
15414         }
15415 }
15416
15417 static const struct net_device_ops tg3_netdev_ops = {
15418         .ndo_open               = tg3_open,
15419         .ndo_stop               = tg3_close,
15420         .ndo_start_xmit         = tg3_start_xmit,
15421         .ndo_get_stats64        = tg3_get_stats64,
15422         .ndo_validate_addr      = eth_validate_addr,
15423         .ndo_set_rx_mode        = tg3_set_rx_mode,
15424         .ndo_set_mac_address    = tg3_set_mac_addr,
15425         .ndo_do_ioctl           = tg3_ioctl,
15426         .ndo_tx_timeout         = tg3_tx_timeout,
15427         .ndo_change_mtu         = tg3_change_mtu,
15428         .ndo_fix_features       = tg3_fix_features,
15429         .ndo_set_features       = tg3_set_features,
15430 #ifdef CONFIG_NET_POLL_CONTROLLER
15431         .ndo_poll_controller    = tg3_poll_controller,
15432 #endif
15433 };
15434
15435 static int __devinit tg3_init_one(struct pci_dev *pdev,
15436                                   const struct pci_device_id *ent)
15437 {
15438         struct net_device *dev;
15439         struct tg3 *tp;
15440         int i, err, pm_cap;
15441         u32 sndmbx, rcvmbx, intmbx;
15442         char str[40];
15443         u64 dma_mask, persist_dma_mask;
15444         netdev_features_t features = 0;
15445
15446         printk_once(KERN_INFO "%s\n", version);
15447
15448         err = pci_enable_device(pdev);
15449         if (err) {
15450                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15451                 return err;
15452         }
15453
15454         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15455         if (err) {
15456                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15457                 goto err_out_disable_pdev;
15458         }
15459
15460         pci_set_master(pdev);
15461
15462         /* Find power-management capability. */
15463         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15464         if (pm_cap == 0) {
15465                 dev_err(&pdev->dev,
15466                         "Cannot find Power Management capability, aborting\n");
15467                 err = -EIO;
15468                 goto err_out_free_res;
15469         }
15470
15471         err = pci_set_power_state(pdev, PCI_D0);
15472         if (err) {
15473                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15474                 goto err_out_free_res;
15475         }
15476
15477         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15478         if (!dev) {
15479                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15480                 err = -ENOMEM;
15481                 goto err_out_power_down;
15482         }
15483
15484         SET_NETDEV_DEV(dev, &pdev->dev);
15485
15486         tp = netdev_priv(dev);
15487         tp->pdev = pdev;
15488         tp->dev = dev;
15489         tp->pm_cap = pm_cap;
15490         tp->rx_mode = TG3_DEF_RX_MODE;
15491         tp->tx_mode = TG3_DEF_TX_MODE;
15492
15493         if (tg3_debug > 0)
15494                 tp->msg_enable = tg3_debug;
15495         else
15496                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15497
15498         /* The word/byte swap controls here control register access byte
15499          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15500          * setting below.
15501          */
15502         tp->misc_host_ctrl =
15503                 MISC_HOST_CTRL_MASK_PCI_INT |
15504                 MISC_HOST_CTRL_WORD_SWAP |
15505                 MISC_HOST_CTRL_INDIR_ACCESS |
15506                 MISC_HOST_CTRL_PCISTATE_RW;
15507
15508         /* The NONFRM (non-frame) byte/word swap controls take effect
15509          * on descriptor entries, anything which isn't packet data.
15510          *
15511          * The StrongARM chips on the board (one for tx, one for rx)
15512          * are running in big-endian mode.
15513          */
15514         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15515                         GRC_MODE_WSWAP_NONFRM_DATA);
15516 #ifdef __BIG_ENDIAN
15517         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15518 #endif
15519         spin_lock_init(&tp->lock);
15520         spin_lock_init(&tp->indirect_lock);
15521         INIT_WORK(&tp->reset_task, tg3_reset_task);
15522
15523         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15524         if (!tp->regs) {
15525                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15526                 err = -ENOMEM;
15527                 goto err_out_free_dev;
15528         }
15529
15530         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15531             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15532             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15533             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15534             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15535             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15536             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15537             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15538                 tg3_flag_set(tp, ENABLE_APE);
15539                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15540                 if (!tp->aperegs) {
15541                         dev_err(&pdev->dev,
15542                                 "Cannot map APE registers, aborting\n");
15543                         err = -ENOMEM;
15544                         goto err_out_iounmap;
15545                 }
15546         }
15547
15548         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15549         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15550
15551         dev->ethtool_ops = &tg3_ethtool_ops;
15552         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15553         dev->netdev_ops = &tg3_netdev_ops;
15554         dev->irq = pdev->irq;
15555
15556         err = tg3_get_invariants(tp);
15557         if (err) {
15558                 dev_err(&pdev->dev,
15559                         "Problem fetching invariants of chip, aborting\n");
15560                 goto err_out_apeunmap;
15561         }
15562
15563         /* The EPB bridge inside 5714, 5715, and 5780 and any
15564          * device behind the EPB cannot support DMA addresses > 40-bit.
15565          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15566          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15567          * do DMA address check in tg3_start_xmit().
15568          */
15569         if (tg3_flag(tp, IS_5788))
15570                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15571         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15572                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15573 #ifdef CONFIG_HIGHMEM
15574                 dma_mask = DMA_BIT_MASK(64);
15575 #endif
15576         } else
15577                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15578
15579         /* Configure DMA attributes. */
15580         if (dma_mask > DMA_BIT_MASK(32)) {
15581                 err = pci_set_dma_mask(pdev, dma_mask);
15582                 if (!err) {
15583                         features |= NETIF_F_HIGHDMA;
15584                         err = pci_set_consistent_dma_mask(pdev,
15585                                                           persist_dma_mask);
15586                         if (err < 0) {
15587                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15588                                         "DMA for consistent allocations\n");
15589                                 goto err_out_apeunmap;
15590                         }
15591                 }
15592         }
15593         if (err || dma_mask == DMA_BIT_MASK(32)) {
15594                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15595                 if (err) {
15596                         dev_err(&pdev->dev,
15597                                 "No usable DMA configuration, aborting\n");
15598                         goto err_out_apeunmap;
15599                 }
15600         }
15601
15602         tg3_init_bufmgr_config(tp);
15603
15604         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15605
15606         /* 5700 B0 chips do not support checksumming correctly due
15607          * to hardware bugs.
15608          */
15609         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15610                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15611
15612                 if (tg3_flag(tp, 5755_PLUS))
15613                         features |= NETIF_F_IPV6_CSUM;
15614         }
15615
15616         /* TSO is on by default on chips that support hardware TSO.
15617          * Firmware TSO on older chips gives lower performance, so it
15618          * is off by default, but can be enabled using ethtool.
15619          */
15620         if ((tg3_flag(tp, HW_TSO_1) ||
15621              tg3_flag(tp, HW_TSO_2) ||
15622              tg3_flag(tp, HW_TSO_3)) &&
15623             (features & NETIF_F_IP_CSUM))
15624                 features |= NETIF_F_TSO;
15625         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15626                 if (features & NETIF_F_IPV6_CSUM)
15627                         features |= NETIF_F_TSO6;
15628                 if (tg3_flag(tp, HW_TSO_3) ||
15629                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15630                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15631                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15632                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15633                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15634                         features |= NETIF_F_TSO_ECN;
15635         }
15636
15637         dev->features |= features;
15638         dev->vlan_features |= features;
15639
15640         /*
15641          * Add loopback capability only for a subset of devices that support
15642          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15643          * loopback for the remaining devices.
15644          */
15645         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15646             !tg3_flag(tp, CPMU_PRESENT))
15647                 /* Add the loopback capability */
15648                 features |= NETIF_F_LOOPBACK;
15649
15650         dev->hw_features |= features;
15651
15652         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15653             !tg3_flag(tp, TSO_CAPABLE) &&
15654             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15655                 tg3_flag_set(tp, MAX_RXPEND_64);
15656                 tp->rx_pending = 63;
15657         }
15658
15659         err = tg3_get_device_address(tp);
15660         if (err) {
15661                 dev_err(&pdev->dev,
15662                         "Could not obtain valid ethernet address, aborting\n");
15663                 goto err_out_apeunmap;
15664         }
15665
15666         /*
15667          * Reset chip in case UNDI or EFI driver did not shutdown
15668          * DMA self test will enable WDMAC and we'll see (spurious)
15669          * pending DMA on the PCI bus at that point.
15670          */
15671         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15672             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15673                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15674                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15675         }
15676
15677         err = tg3_test_dma(tp);
15678         if (err) {
15679                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15680                 goto err_out_apeunmap;
15681         }
15682
15683         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15684         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15685         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15686         for (i = 0; i < tp->irq_max; i++) {
15687                 struct tg3_napi *tnapi = &tp->napi[i];
15688
15689                 tnapi->tp = tp;
15690                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15691
15692                 tnapi->int_mbox = intmbx;
15693                 if (i <= 4)
15694                         intmbx += 0x8;
15695                 else
15696                         intmbx += 0x4;
15697
15698                 tnapi->consmbox = rcvmbx;
15699                 tnapi->prodmbox = sndmbx;
15700
15701                 if (i)
15702                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15703                 else
15704                         tnapi->coal_now = HOSTCC_MODE_NOW;
15705
15706                 if (!tg3_flag(tp, SUPPORT_MSIX))
15707                         break;
15708
15709                 /*
15710                  * If we support MSIX, we'll be using RSS.  If we're using
15711                  * RSS, the first vector only handles link interrupts and the
15712                  * remaining vectors handle rx and tx interrupts.  Reuse the
15713                  * mailbox values for the next iteration.  The values we setup
15714                  * above are still useful for the single vectored mode.
15715                  */
15716                 if (!i)
15717                         continue;
15718
15719                 rcvmbx += 0x8;
15720
15721                 if (sndmbx & 0x4)
15722                         sndmbx -= 0x4;
15723                 else
15724                         sndmbx += 0xc;
15725         }
15726
15727         tg3_init_coal(tp);
15728
15729         pci_set_drvdata(pdev, dev);
15730
15731         if (tg3_flag(tp, 5717_PLUS)) {
15732                 /* Resume a low-power mode */
15733                 tg3_frob_aux_power(tp, false);
15734         }
15735
15736         err = register_netdev(dev);
15737         if (err) {
15738                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15739                 goto err_out_apeunmap;
15740         }
15741
15742         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15743                     tp->board_part_number,
15744                     tp->pci_chip_rev_id,
15745                     tg3_bus_string(tp, str),
15746                     dev->dev_addr);
15747
15748         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15749                 struct phy_device *phydev;
15750                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15751                 netdev_info(dev,
15752                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15753                             phydev->drv->name, dev_name(&phydev->dev));
15754         } else {
15755                 char *ethtype;
15756
15757                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15758                         ethtype = "10/100Base-TX";
15759                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15760                         ethtype = "1000Base-SX";
15761                 else
15762                         ethtype = "10/100/1000Base-T";
15763
15764                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15765                             "(WireSpeed[%d], EEE[%d])\n",
15766                             tg3_phy_string(tp), ethtype,
15767                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15768                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15769         }
15770
15771         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15772                     (dev->features & NETIF_F_RXCSUM) != 0,
15773                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15774                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15775                     tg3_flag(tp, ENABLE_ASF) != 0,
15776                     tg3_flag(tp, TSO_CAPABLE) != 0);
15777         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15778                     tp->dma_rwctrl,
15779                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15780                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15781
15782         pci_save_state(pdev);
15783
15784         return 0;
15785
15786 err_out_apeunmap:
15787         if (tp->aperegs) {
15788                 iounmap(tp->aperegs);
15789                 tp->aperegs = NULL;
15790         }
15791
15792 err_out_iounmap:
15793         if (tp->regs) {
15794                 iounmap(tp->regs);
15795                 tp->regs = NULL;
15796         }
15797
15798 err_out_free_dev:
15799         free_netdev(dev);
15800
15801 err_out_power_down:
15802         pci_set_power_state(pdev, PCI_D3hot);
15803
15804 err_out_free_res:
15805         pci_release_regions(pdev);
15806
15807 err_out_disable_pdev:
15808         pci_disable_device(pdev);
15809         pci_set_drvdata(pdev, NULL);
15810         return err;
15811 }
15812
15813 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15814 {
15815         struct net_device *dev = pci_get_drvdata(pdev);
15816
15817         if (dev) {
15818                 struct tg3 *tp = netdev_priv(dev);
15819
15820                 if (tp->fw)
15821                         release_firmware(tp->fw);
15822
15823                 tg3_reset_task_cancel(tp);
15824
15825                 if (tg3_flag(tp, USE_PHYLIB)) {
15826                         tg3_phy_fini(tp);
15827                         tg3_mdio_fini(tp);
15828                 }
15829
15830                 unregister_netdev(dev);
15831                 if (tp->aperegs) {
15832                         iounmap(tp->aperegs);
15833                         tp->aperegs = NULL;
15834                 }
15835                 if (tp->regs) {
15836                         iounmap(tp->regs);
15837                         tp->regs = NULL;
15838                 }
15839                 free_netdev(dev);
15840                 pci_release_regions(pdev);
15841                 pci_disable_device(pdev);
15842                 pci_set_drvdata(pdev, NULL);
15843         }
15844 }
15845
15846 #ifdef CONFIG_PM_SLEEP
15847 static int tg3_suspend(struct device *device)
15848 {
15849         struct pci_dev *pdev = to_pci_dev(device);
15850         struct net_device *dev = pci_get_drvdata(pdev);
15851         struct tg3 *tp = netdev_priv(dev);
15852         int err;
15853
15854         if (!netif_running(dev))
15855                 return 0;
15856
15857         tg3_reset_task_cancel(tp);
15858         tg3_phy_stop(tp);
15859         tg3_netif_stop(tp);
15860
15861         del_timer_sync(&tp->timer);
15862
15863         tg3_full_lock(tp, 1);
15864         tg3_disable_ints(tp);
15865         tg3_full_unlock(tp);
15866
15867         netif_device_detach(dev);
15868
15869         tg3_full_lock(tp, 0);
15870         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15871         tg3_flag_clear(tp, INIT_COMPLETE);
15872         tg3_full_unlock(tp);
15873
15874         err = tg3_power_down_prepare(tp);
15875         if (err) {
15876                 int err2;
15877
15878                 tg3_full_lock(tp, 0);
15879
15880                 tg3_flag_set(tp, INIT_COMPLETE);
15881                 err2 = tg3_restart_hw(tp, 1);
15882                 if (err2)
15883                         goto out;
15884
15885                 tp->timer.expires = jiffies + tp->timer_offset;
15886                 add_timer(&tp->timer);
15887
15888                 netif_device_attach(dev);
15889                 tg3_netif_start(tp);
15890
15891 out:
15892                 tg3_full_unlock(tp);
15893
15894                 if (!err2)
15895                         tg3_phy_start(tp);
15896         }
15897
15898         return err;
15899 }
15900
15901 static int tg3_resume(struct device *device)
15902 {
15903         struct pci_dev *pdev = to_pci_dev(device);
15904         struct net_device *dev = pci_get_drvdata(pdev);
15905         struct tg3 *tp = netdev_priv(dev);
15906         int err;
15907
15908         if (!netif_running(dev))
15909                 return 0;
15910
15911         netif_device_attach(dev);
15912
15913         tg3_full_lock(tp, 0);
15914
15915         tg3_flag_set(tp, INIT_COMPLETE);
15916         err = tg3_restart_hw(tp, 1);
15917         if (err)
15918                 goto out;
15919
15920         tp->timer.expires = jiffies + tp->timer_offset;
15921         add_timer(&tp->timer);
15922
15923         tg3_netif_start(tp);
15924
15925 out:
15926         tg3_full_unlock(tp);
15927
15928         if (!err)
15929                 tg3_phy_start(tp);
15930
15931         return err;
15932 }
15933
15934 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15935 #define TG3_PM_OPS (&tg3_pm_ops)
15936
15937 #else
15938
15939 #define TG3_PM_OPS NULL
15940
15941 #endif /* CONFIG_PM_SLEEP */
15942
15943 /**
15944  * tg3_io_error_detected - called when PCI error is detected
15945  * @pdev: Pointer to PCI device
15946  * @state: The current pci connection state
15947  *
15948  * This function is called after a PCI bus error affecting
15949  * this device has been detected.
15950  */
15951 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15952                                               pci_channel_state_t state)
15953 {
15954         struct net_device *netdev = pci_get_drvdata(pdev);
15955         struct tg3 *tp = netdev_priv(netdev);
15956         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15957
15958         netdev_info(netdev, "PCI I/O error detected\n");
15959
15960         rtnl_lock();
15961
15962         if (!netif_running(netdev))
15963                 goto done;
15964
15965         tg3_phy_stop(tp);
15966
15967         tg3_netif_stop(tp);
15968
15969         del_timer_sync(&tp->timer);
15970
15971         /* Want to make sure that the reset task doesn't run */
15972         tg3_reset_task_cancel(tp);
15973         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15974
15975         netif_device_detach(netdev);
15976
15977         /* Clean up software state, even if MMIO is blocked */
15978         tg3_full_lock(tp, 0);
15979         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15980         tg3_full_unlock(tp);
15981
15982 done:
15983         if (state == pci_channel_io_perm_failure)
15984                 err = PCI_ERS_RESULT_DISCONNECT;
15985         else
15986                 pci_disable_device(pdev);
15987
15988         rtnl_unlock();
15989
15990         return err;
15991 }
15992
15993 /**
15994  * tg3_io_slot_reset - called after the pci bus has been reset.
15995  * @pdev: Pointer to PCI device
15996  *
15997  * Restart the card from scratch, as if from a cold-boot.
15998  * At this point, the card has exprienced a hard reset,
15999  * followed by fixups by BIOS, and has its config space
16000  * set up identically to what it was at cold boot.
16001  */
16002 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16003 {
16004         struct net_device *netdev = pci_get_drvdata(pdev);
16005         struct tg3 *tp = netdev_priv(netdev);
16006         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16007         int err;
16008
16009         rtnl_lock();
16010
16011         if (pci_enable_device(pdev)) {
16012                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16013                 goto done;
16014         }
16015
16016         pci_set_master(pdev);
16017         pci_restore_state(pdev);
16018         pci_save_state(pdev);
16019
16020         if (!netif_running(netdev)) {
16021                 rc = PCI_ERS_RESULT_RECOVERED;
16022                 goto done;
16023         }
16024
16025         err = tg3_power_up(tp);
16026         if (err)
16027                 goto done;
16028
16029         rc = PCI_ERS_RESULT_RECOVERED;
16030
16031 done:
16032         rtnl_unlock();
16033
16034         return rc;
16035 }
16036
16037 /**
16038  * tg3_io_resume - called when traffic can start flowing again.
16039  * @pdev: Pointer to PCI device
16040  *
16041  * This callback is called when the error recovery driver tells
16042  * us that its OK to resume normal operation.
16043  */
16044 static void tg3_io_resume(struct pci_dev *pdev)
16045 {
16046         struct net_device *netdev = pci_get_drvdata(pdev);
16047         struct tg3 *tp = netdev_priv(netdev);
16048         int err;
16049
16050         rtnl_lock();
16051
16052         if (!netif_running(netdev))
16053                 goto done;
16054
16055         tg3_full_lock(tp, 0);
16056         tg3_flag_set(tp, INIT_COMPLETE);
16057         err = tg3_restart_hw(tp, 1);
16058         tg3_full_unlock(tp);
16059         if (err) {
16060                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16061                 goto done;
16062         }
16063
16064         netif_device_attach(netdev);
16065
16066         tp->timer.expires = jiffies + tp->timer_offset;
16067         add_timer(&tp->timer);
16068
16069         tg3_netif_start(tp);
16070
16071         tg3_phy_start(tp);
16072
16073 done:
16074         rtnl_unlock();
16075 }
16076
16077 static struct pci_error_handlers tg3_err_handler = {
16078         .error_detected = tg3_io_error_detected,
16079         .slot_reset     = tg3_io_slot_reset,
16080         .resume         = tg3_io_resume
16081 };
16082
16083 static struct pci_driver tg3_driver = {
16084         .name           = DRV_MODULE_NAME,
16085         .id_table       = tg3_pci_tbl,
16086         .probe          = tg3_init_one,
16087         .remove         = __devexit_p(tg3_remove_one),
16088         .err_handler    = &tg3_err_handler,
16089         .driver.pm      = TG3_PM_OPS,
16090 };
16091
16092 static int __init tg3_init(void)
16093 {
16094         return pci_register_driver(&tg3_driver);
16095 }
16096
16097 static void __exit tg3_cleanup(void)
16098 {
16099         pci_unregister_driver(&tg3_driver);
16100 }
16101
16102 module_init(tg3_init);
16103 module_exit(tg3_cleanup);